diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 9eb6c8e4..6b01249e 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -21,8 +21,8 @@ jobs: - name: Test run: go test -v ./... - - uses: dominikh/staticcheck-action@v1.3.0 + - uses: dominikh/staticcheck-action@v1.3.1 with: - version: "2023.1" + version: "latest" # go is already installed by actions/setup-go with the correct version from go.mod install-go: false diff --git a/go.mod b/go.mod index 48812ff5..8887786c 100644 --- a/go.mod +++ b/go.mod @@ -1,80 +1,91 @@ module github.com/justwatchcom/sql_exporter -go 1.21 +go 1.24 require ( - cloud.google.com/go/cloudsqlconn v1.12.1 - github.com/ClickHouse/clickhouse-go/v2 v2.17.1 - github.com/aws/aws-sdk-go v1.50.6 + cloud.google.com/go/cloudsqlconn v1.16.1 + github.com/ClickHouse/clickhouse-go/v2 v2.34.0 + github.com/aws/aws-sdk-go v1.55.7 github.com/cenkalti/backoff v2.2.1+incompatible github.com/go-kit/log v0.2.1 - github.com/go-sql-driver/mysql v1.8.1 + github.com/go-sql-driver/mysql v1.9.2 github.com/gobwas/glob v0.2.3 github.com/jmoiron/sqlx v1.4.0 github.com/lib/pq v1.10.9 github.com/microsoft/go-mssqldb v1.8.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.60.0 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/common v0.63.0 github.com/robfig/cron/v3 v3.0.1 - github.com/segmentio/go-athena v0.0.0-20230626212750-5fac08ed8dab - github.com/snowflakedb/gosnowflake v1.7.2 + github.com/segmentio/go-athena v0.1.0 + github.com/snowflakedb/gosnowflake v1.13.2 github.com/vertica/vertica-sql-go v1.3.3 - go.uber.org/automaxprocs v1.5.3 - google.golang.org/api v0.197.0 + go.uber.org/automaxprocs v1.6.0 + google.golang.org/api v0.230.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - cloud.google.com/go/auth v0.9.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/auth v0.16.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 // indirect - github.com/ClickHouse/ch-go v0.61.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/ClickHouse/ch-go v0.65.1 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect - github.com/andybalholm/brotli v1.1.0 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect + github.com/apache/arrow-go/v18 v18.2.0 // indirect github.com/apache/arrow/go/v14 v14.0.2 // indirect - github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 // indirect - github.com/aws/smithy-go v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.73 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/service/athena v1.50.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect + github.com/aws/smithy-go v1.22.3 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/danieljoos/wincred v1.2.1 // indirect - github.com/dvsekhvalnov/jose2go v1.6.0 // indirect - github.com/elastic/go-sysinfo v1.11.2 // indirect - github.com/elastic/go-windows v1.0.1 // indirect + github.com/danieljoos/wincred v1.2.2 // indirect + github.com/dvsekhvalnov/jose2go v1.8.0 // indirect + github.com/elastic/go-sysinfo v1.15.3 // indirect + github.com/elastic/go-windows v1.0.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/gabriel-vasile/mimetype v1.4.9 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/flatbuffers v23.5.26+incompatible // indirect - github.com/google/s2a-go v0.1.8 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/flatbuffers v25.2.10+incompatible // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -93,40 +104,41 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/paulmach/orb v0.11.1 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/segmentio/asm v1.2.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.66.2 // indirect - google.golang.org/protobuf v1.34.2 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + golang.org/x/crypto v0.37.0 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.32.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/grpc v1.72.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.1 // indirect ) diff --git a/go.sum b/go.sum index 759172f8..5a1a0637 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,20 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= +cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/cloudsqlconn v1.12.1 h1:crfkYy4TsYXx+n/wELlPthDYaAmDo2olHLv9DmRuJzY= cloud.google.com/go/cloudsqlconn v1.12.1/go.mod h1:Y8x/9e+QsjJNkvOj9mdJ8/ixhE95Ab2H/vsyy0mXWNc= +cloud.google.com/go/cloudsqlconn v1.16.1 h1:0DaCn076Qr7H5TsHm65k/fzjAv4dNUQB2fq9ULzmXMU= +cloud.google.com/go/cloudsqlconn v1.16.1/go.mod h1:T6G4pmABtHqRyTF8M+oUe1KELh6Rs4PVSngAZDBz3zA= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= @@ -15,79 +23,144 @@ github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XB github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/ClickHouse/ch-go v0.61.2 h1:8+8eKO2VgxoRa0yLJpWwkqJxi/jrtP5Z+J6eZdPfwdc= github.com/ClickHouse/ch-go v0.61.2/go.mod h1:ZSVIE1A7mGJNcJeBvVF1v5bo12n0Wmnw30RhnPCpLzg= +github.com/ClickHouse/ch-go v0.65.1 h1:SLuxmLl5Mjj44/XbINsK2HFvzqup0s6rwKLFH347ZhU= +github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go/v2 v2.17.1 h1:ZCmAYWpu75IyEi7+Yrs/uaAjiCGY5wfW5kXo64exkX4= github.com/ClickHouse/clickhouse-go/v2 v2.17.1/go.mod h1:rkGTvFDTLqLIm0ma+13xmcCfr/08Gvs7KmFt1tgiWHQ= +github.com/ClickHouse/clickhouse-go/v2 v2.34.0 h1:Y4rqkdrRHgExvC4o/NTbLdY5LFQ3LHS77/RNFxFX3Co= +github.com/ClickHouse/clickhouse-go/v2 v2.34.0/go.mod h1:yioSINoRLVZkLyDzdMXPLRIqhDvel8iLBlwh6Iefso8= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/apache/arrow-go/v18 v18.2.0 h1:QhWqpgZMKfWOniGPhbUxrHohWnooGURqL2R2Gg4SO1Q= +github.com/apache/arrow-go/v18 v18.2.0/go.mod h1:Ic/01WSwGJWRrdAZcxjBZ5hbApNJ28K96jGYaxzzGUc= github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= github.com/aws/aws-sdk-go v1.50.6 h1:FaXvNwHG3Ri1paUEW16Ahk9zLVqSAdqa1M3phjZR35Q= github.com/aws/aws-sdk-go v1.50.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= +github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= +github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 h1:2MUXyGW6dVaQz6aqycpbdLIH1NMcUI6kW6vQ0RabGYg= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15/go.mod h1:aHbhbR6WEQgHAiRj41EQ2W47yOYwNtIkWTXmcAtYqj8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.73 h1:I91eIdOJMVK9oNiH2jvhp/AxMW+Gff8Rb5VjVHMhcJU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.73/go.mod h1:vq7/m7dahFXcdzWVOvvjasDI9RcsD3RsTfHmDundJYg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/athena v1.50.4 h1:QWhxjrA0r+FQnDAATdGqLXUvYW0MdUIvCBK89BN3OfU= +github.com/aws/aws-sdk-go-v2/service/athena v1.50.4/go.mod h1:xsG8Y2fMenmHTdukyknTUO1uQhEZ/entaNHvPmD1klE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 h1:5XNlsBsEvBZBMO6p82y+sqpWg8j5aBCe+5C2GBFgqBQ= github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2 h1:tWUG+4wZqdMl/znThEk9tcCy8tTMxq8dW0JTgamohrY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= +github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -100,6 +173,8 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -107,18 +182,26 @@ github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m3 github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.0.4+incompatible h1:JNNkBctYKurkw6FrHfKqY0nKIDf5nrbxjVBtS+cdcok= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/dvsekhvalnov/jose2go v1.8.0 h1:LqkkVKAlHFfH9LOEl5fe4p/zL02OhWE7pCufMBG2jLA= +github.com/dvsekhvalnov/jose2go v1.8.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTKSXsDHM+uIM= github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= +github.com/elastic/go-sysinfo v1.15.3 h1:W+RnmhKFkqPTCRoFq2VCTmsT4p/fwpo+3gKNQsn1XU0= +github.com/elastic/go-sysinfo v1.15.3/go.mod h1:K/cNrqYTDrSoMh2oDkYEMS2+a72GRxMvNP+GC+vRIlo= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/elastic/go-windows v1.0.2 h1:yoLLsAsV5cfg9FLhZ9EXZ2n2sQFKeDYrHenkcivY4vI= +github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -129,6 +212,8 @@ github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9 github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= +github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= @@ -146,11 +231,15 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= @@ -159,6 +248,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= @@ -167,6 +258,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -181,6 +274,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -191,16 +286,23 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= @@ -256,6 +358,7 @@ github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -288,8 +391,12 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -327,11 +434,14 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -343,19 +453,26 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4 github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -365,16 +482,22 @@ github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/go-athena v0.0.0-20230626212750-5fac08ed8dab h1:C56AWluoKnwfg31uBj91htttC26hyp3D4lDpnt2vJco= github.com/segmentio/go-athena v0.0.0-20230626212750-5fac08ed8dab/go.mod h1:umGD11uSGUY8Vd0lbo1jJUEAk4FxVV3YE5wRSXx1Lbk= +github.com/segmentio/go-athena v0.1.0 h1:uKI2bWV0z8aXP7yPogrSazzeFORw91KGv7z5rnVP+B4= +github.com/segmentio/go-athena v0.1.0/go.mod h1:o8DalJ3YiiIPnmBbhrEcl4dtY2NeWSNyhBQb1dgIuqU= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/snowflakedb/gosnowflake v1.7.2 h1:HRSwva8YXC64WUppfmHcMNVVzSE1+EwXXaJxgS0EkTo= github.com/snowflakedb/gosnowflake v1.7.2/go.mod h1:03tW856vc3ceM4rJuj7KO4dzqN7qoezTm+xw7aPIIFo= +github.com/snowflakedb/gosnowflake v1.13.2 h1:78ovPH3fcxeHkVbOI1o98NuVWJmu0Lax10nlIRYnbjM= +github.com/snowflakedb/gosnowflake v1.13.2/go.mod h1:tNf6gX01sgKaQUBo3De+KctsK6EwUtwmwp9wf6eYKPU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -392,12 +515,14 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -410,20 +535,32 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -445,9 +582,13 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -459,6 +600,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -477,9 +620,13 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -490,6 +637,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -512,12 +661,16 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -527,8 +680,12 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -546,6 +703,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -554,20 +713,29 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM= +google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -575,6 +743,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -588,6 +758,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/job.go b/job.go index 6852e6ed..340a86bc 100644 --- a/job.go +++ b/job.go @@ -11,15 +11,15 @@ import ( "time" _ "github.com/ClickHouse/clickhouse-go/v2" // register the ClickHouse driver - "github.com/cenkalti/backoff" - _ "github.com/microsoft/go-mssqldb" // register the MS-SQL driver - _ "github.com/microsoft/go-mssqldb/integratedauth/krb5" // Register integrated auth for MS-SQL + "github.com/cenkalti/backoff/v5" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/go-sql-driver/mysql" // register the MySQL driver "github.com/gobwas/glob" "github.com/jmoiron/sqlx" - _ "github.com/lib/pq" // register the PostgreSQL driver + _ "github.com/lib/pq" // register the PostgreSQL driver + _ "github.com/microsoft/go-mssqldb" // register the MS-SQL driver + _ "github.com/microsoft/go-mssqldb/integratedauth/krb5" // Register integrated auth for MS-SQL "github.com/prometheus/client_golang/prometheus" _ "github.com/segmentio/go-athena" // register the AWS Athena driver "github.com/snowflakedb/gosnowflake" diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 5584c350..fd6f0623 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,161 @@ # Changelog +## [0.16.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.0...auth/v0.16.1) (2025-04-23) + + +### Bug Fixes + +* **auth:** Clone detectopts before assigning TokenBindingType ([#11881](https://github.com/googleapis/google-cloud-go/issues/11881)) ([2167b02](https://github.com/googleapis/google-cloud-go/commit/2167b020fdc43b517c2b6ecca264a10e357ea035)) + +## [0.16.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.15.0...auth/v0.16.0) (2025-04-14) + + +### Features + +* **auth/credentials:** Return X.509 certificate chain as subject token ([#11948](https://github.com/googleapis/google-cloud-go/issues/11948)) ([d445a3f](https://github.com/googleapis/google-cloud-go/commit/d445a3f66272ffd5c39c4939af9bebad4582631c)), refs [#11757](https://github.com/googleapis/google-cloud-go/issues/11757) +* **auth:** Configure DirectPath bound credentials from AllowedHardBoundTokens ([#11665](https://github.com/googleapis/google-cloud-go/issues/11665)) ([0fc40bc](https://github.com/googleapis/google-cloud-go/commit/0fc40bcf4e4673704df0973e9fa65957395d7bb4)) + + +### Bug Fixes + +* **auth:** Allow non-default SA credentials for DP ([#11828](https://github.com/googleapis/google-cloud-go/issues/11828)) ([3a996b4](https://github.com/googleapis/google-cloud-go/commit/3a996b4129e6d0a34dfda6671f535d5aefb26a82)) +* **auth:** Restore calling DialContext ([#11930](https://github.com/googleapis/google-cloud-go/issues/11930)) ([9ec9a29](https://github.com/googleapis/google-cloud-go/commit/9ec9a29494e93197edbaf45aba28984801e9770a)), refs [#11118](https://github.com/googleapis/google-cloud-go/issues/11118) + +## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19) + + +### Features + +* **auth:** Add hard-bound token request to compute token provider. ([#11588](https://github.com/googleapis/google-cloud-go/issues/11588)) ([0e608bb](https://github.com/googleapis/google-cloud-go/commit/0e608bb5ac3d694c8ad36ca4340071d3a2c78699)) + +## [0.14.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.0...auth/v0.14.1) (2025-01-24) + + +### Documentation + +* **auth:** Add warning about externally-provided credentials ([#11462](https://github.com/googleapis/google-cloud-go/issues/11462)) ([49fb6ff](https://github.com/googleapis/google-cloud-go/commit/49fb6ff4d754895f82c9c4d502fc7547d3b5a941)) + +## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08) + + +### Features + +* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a)) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) +* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379) +* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + +## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13) + + +### Features + +* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d)) +* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f)) + + +### Bug Fixes + +* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90)) + +## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10) + + +### Bug Fixes + +* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1)) + +## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04) + + +### Features + +* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005) + + +### Bug Fixes + +* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188) + +## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21) + + +### Features + +* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344)) + +## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12) + + +### Bug Fixes + +* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556) + +## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06) + + +### Bug Fixes + +* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2)) +* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6)) + +## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30) + + +### Features + +* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b)) + +## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22) + + +### Bug Fixes + +* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844) +* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114)) + +## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) + + +### Bug Fixes + +* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962) +* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287)) + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + ## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md index 36de276a..6fe4f076 100644 --- a/vendor/cloud.google.com/go/auth/README.md +++ b/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index bc37ea85..cd5e9886 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -24,6 +24,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "strings" @@ -32,6 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -227,9 +229,7 @@ type CredentialsOptions struct { UniverseDomainProvider CredentialsPropertyProvider } -// NewCredentials returns new [Credentials] from the provided options. Most users -// will want to build this object a function from the -// [cloud.google.com/go/auth/credentials] package. +// NewCredentials returns new [Credentials] from the provided options. func NewCredentials(opts *CredentialsOptions) *Credentials { creds := &Credentials{ TokenProvider: opts.TokenProvider, @@ -242,8 +242,8 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { return creds } -// CachedTokenProviderOptions provided options for configuring a -// CachedTokenProvider. +// CachedTokenProviderOptions provides options for configuring a cached +// [TokenProvider]. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, // even if it is expired. The default is false. Optional. @@ -253,7 +253,7 @@ type CachedTokenProviderOptions struct { // seconds. Optional. ExpireEarly time.Duration // DisableAsyncRefresh configures a synchronous workflow that refreshes - // stale tokens while blocking. The default is false. Optional. + // tokens in a blocking manner. The default is false. Optional. DisableAsyncRefresh bool } @@ -280,12 +280,7 @@ func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned // by the underlying provider. By default it will refresh tokens asynchronously -// (non-blocking mode) within a window that starts 3 minutes and 45 seconds -// before they expire. The asynchronous (non-blocking) refresh can be changed to -// a synchronous (blocking) refresh using the -// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry -// duration can be configured using the CachedTokenProviderOptions.ExpireEarly -// option. +// a few minutes before they expire. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp @@ -328,7 +323,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err defer c.mu.Unlock() return c.cachedToken, nil case stale: - c.tokenAsync(ctx) + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) // Return the stale token immediately to not block customer requests to Cloud services. c.mu.Lock() defer c.mu.Unlock() @@ -343,13 +340,14 @@ func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() t := c.cachedToken + now := timeNow() if t == nil || t.Value == "" { return invalid } else if t.Expiry.IsZero() { return fresh - } else if timeNow().After(t.Expiry.Round(0)) { + } else if now.After(t.Expiry.Round(0)) { return invalid - } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) { return stale } return fresh @@ -494,6 +492,11 @@ type Options2LO struct { // UseIDToken requests that the token returned be an ID token if one is // returned from the server. Optional. UseIDToken bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options2LO) client() *http.Client { @@ -524,12 +527,13 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { if err := opts.validate(); err != nil { return nil, err } - return tokenProvider2LO{opts: opts, Client: opts.client()}, nil + return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil } type tokenProvider2LO struct { opts *Options2LO Client *http.Client + logger *slog.Logger } func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { @@ -564,10 +568,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } + tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 6f70fa35..e4a8078f 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,8 +37,12 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ +func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider { + return auth.NewCachedTokenProvider(&computeProvider{ + scopes: opts.Scopes, + client: client, + tokenBindingType: opts.TokenBindingType, + }, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, DisableAsyncRefresh: opts.DisableAsyncRefresh, }) @@ -46,7 +50,9 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { // computeProvider fetches tokens from the google cloud metadata service. type computeProvider struct { - scopes []string + scopes []string + client *metadata.Client + tokenBindingType TokenBindingType } type metadataTokenResp struct { @@ -55,17 +61,27 @@ type metadataTokenResp struct { TokenType string `json:"token_type"` } -func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { +func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { tokenURI, err := url.Parse(computeTokenURI) if err != nil { return nil, err } - if len(cs.scopes) > 0 { + hasScopes := len(cs.scopes) > 0 + if hasScopes || cs.tokenBindingType != NoBinding { v := url.Values{} - v.Set("scopes", strings.Join(cs.scopes, ",")) + if hasScopes { + v.Set("scopes", strings.Join(cs.scopes, ",")) + } + switch cs.tokenBindingType { + case MTLSHardBinding: + v.Set("transport", "mtls") + v.Set("binding-enforcement", "on") + case ALTSHardBinding: + v.Set("transport", "alts") + } tokenURI.RawQuery = v.Encode() } - tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String()) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index 010afc37..d8f7d961 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -19,6 +19,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "os" "time" @@ -27,6 +28,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/compute/metadata" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -49,6 +51,23 @@ var ( allowOnGCECheck = true ) +// TokenBindingType specifies the type of binding used when requesting a token +// whether to request a hard-bound token using mTLS or an instance identity +// bound token using ALTS. +type TokenBindingType int + +const ( + // NoBinding specifies that requested tokens are not required to have a + // binding. This is the default option. + NoBinding TokenBindingType = iota + // MTLSHardBinding specifies that a hard-bound token should be requested + // using an mTLS with S2A channel. + MTLSHardBinding + // ALTSHardBinding specifies that an instance identity bound token should + // be requested using an ALTS channel. + ALTSHardBinding +) + // OnGCE reports whether this process is running in Google Cloud. func OnGCE() bool { // TODO(codyoss): once all libs use this auth lib move metadata check here @@ -96,12 +115,17 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { } if OnGCE() { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: opts.logger(), + }) return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts), + TokenProvider: computeTokenProvider(opts, metadataClient), ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { - return metadata.ProjectIDWithContext(ctx) + return metadataClient.ProjectIDWithContext(ctx) }), - UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ + MetadataClient: metadataClient, + }, }), nil } @@ -114,6 +138,10 @@ type DetectOptions struct { // https://www.googleapis.com/auth/cloud-platform. Required if Audience is // not provided. Scopes []string + // TokenBindingType specifies the type of binding used when requesting a + // token whether to request a hard-bound token using mTLS or an instance + // identity bound token using ALTS. Optional. + TokenBindingType TokenBindingType // Audience that credentials tokens should have. Only applicable for 2LO // flows with service accounts. If specified, scopes should not be provided. Audience string @@ -142,10 +170,26 @@ type DetectOptions struct { // CredentialsFile overrides detection logic and sources a credential file // from the provided filepath. If provided, CredentialsJSON must not be. // Optional. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). CredentialsFile string // CredentialsJSON overrides detection logic and uses the JSON bytes as the // source for the credential. If provided, CredentialsFile must not be. // Optional. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). CredentialsJSON []byte // UseSelfSignedJWT directs service account based credentials to create a // self-signed JWT with the private key found in the file, skipping any @@ -158,6 +202,11 @@ type DetectOptions struct { // The default value is "googleapis.com". This option is ignored for // authentication flows that do not support universe domain. Optional. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *DetectOptions) validate() error { @@ -193,6 +242,10 @@ func (o *DetectOptions) client() *http.Client { return internal.DefaultClient() } +func (o *DetectOptions) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { b, err := os.ReadFile(filename) if err != nil { @@ -253,6 +306,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { AuthURL: c.AuthURI, TokenURL: c.TokenURI, Client: opts.client(), + Logger: opts.logger(), EarlyTokenExpiry: opts.EarlyTokenRefresh, AuthHandlerOpts: handleOpts, // TODO(codyoss): refactor this out. We need to add in auto-detection diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index cf56b025..e5243e6c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -124,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, @@ -135,6 +141,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) TokenURL: f.TokenURL, Subject: opts.Subject, Client: opts.client(), + Logger: opts.logger(), } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL @@ -153,6 +160,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, Client: opts.client(), + Logger: opts.logger(), } return auth.New3LOTokenProvider(opts3LO) } @@ -171,6 +179,7 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), + Logger: opts.logger(), IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { @@ -189,6 +198,7 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU ClientSecret: f.ClientSecret, Scopes: opts.scopes(), Client: opts.client(), + Logger: opts.logger(), } return externalaccountuser.NewTokenProvider(externalOpts) } @@ -208,6 +218,7 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil Tp: tp, Delegates: f.Delegates, Client: opts.client(), + Logger: opts.logger(), }) } @@ -215,5 +226,6 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO return gdch.NewTokenProvider(f, &gdch.Options{ STSAudience: opts.STSAudience, Client: opts.client(), + Logger: opts.logger(), }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index a34f6b06..9ecd1f64 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) var ( @@ -87,6 +89,7 @@ type awsSubjectProvider struct { reqOpts *RequestOptions Client *http.Client + logger *slog.Logger } func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -94,32 +97,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) if sp.RegionalCredVerificationURL == "" { sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL } - if sp.requestSigner == nil { - headers := make(map[string]string) - if sp.shouldUseMetadataServer() { - awsSessionToken, err := sp.getAWSSessionToken(ctx) - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) if err != nil { return "", err } - if sp.region, err = sp.getRegion(ctx, headers); err != nil { - return "", err - } - sp.requestSigner = &awsRequestSigner{ - RegionName: sp.region, - AwsSecurityCredentials: awsSecurityCredentials, + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken } } + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) @@ -194,10 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } @@ -227,10 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } @@ -285,10 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } + sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) } @@ -310,10 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index 112186a9..f4f49f17 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "regexp" "strconv" @@ -28,6 +29,7 @@ import ( "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -104,6 +106,11 @@ type Options struct { // This is important for X509 credentials which should create a new client if the default was used // but should respect a client explicitly passed in by the user. IsDefaultClient bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -224,6 +231,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { return nil, err } opts.resolveTokenURL() + logger := internallog.New(opts.Logger) stp, err := newSubjectTokenProvider(opts) if err != nil { return nil, err @@ -238,6 +246,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { client: client, opts: opts, stp: stp, + logger: logger, } if opts.ServiceAccountImpersonationURL == "" { @@ -254,6 +263,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + Logger: logger, }) if err != nil { return nil, err @@ -269,6 +279,7 @@ type subjectTokenProvider interface { // tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. type tokenProvider struct { client *http.Client + logger *slog.Logger opts *Options stp subjectTokenProvider } @@ -310,6 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { Authentication: clientAuth, Headers: header, ExtraOpts: options, + Logger: tp.logger, }) if err != nil { return nil, err @@ -330,12 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { // newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a // subjectTokenProvider func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + logger := internallog.New(o.Logger) reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} if o.AwsSecurityCredentialsProvider != nil { return &awsSubjectProvider{ securityCredentialsProvider: o.AwsSecurityCredentialsProvider, TargetResource: o.Audience, reqOpts: reqOpts, + logger: logger, }, nil } else if o.SubjectTokenProvider != nil { return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil @@ -352,6 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { CredVerificationURL: o.CredentialSource.URL, TargetResource: o.Audience, Client: o.Client, + logger: logger, } if o.CredentialSource.IMDSv2SessionTokenURL != "" { awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL @@ -362,7 +377,13 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { } else if o.CredentialSource.File != "" { return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil } else if o.CredentialSource.URL != "" { - return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + return &urlSubjectProvider{ + URL: o.CredentialSource.URL, + Headers: o.CredentialSource.Headers, + Format: o.CredentialSource.Format, + Client: o.Client, + Logger: logger, + }, nil } else if o.CredentialSource.Executable != nil { ec := o.CredentialSource.Executable if ec.Command == "" { @@ -392,7 +413,10 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") } - return &x509Provider{}, nil + return &x509Provider{ + TrustChainPath: o.CredentialSource.Certificate.TrustChainPath, + ConfigFilePath: o.CredentialSource.Certificate.CertificateConfigLocation, + }, nil } return nil, errors.New("credentials: unable to parse credential source") } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index 0a020599..754ecf4f 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -19,10 +19,12 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -38,6 +40,7 @@ type urlSubjectProvider struct { Headers map[string]string Format *credsfile.Format Client *http.Client + Logger *slog.Logger } func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -49,10 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } + sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } + sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return "", fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go index 115df588..d86ca593 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -17,27 +17,184 @@ package externalaccount import ( "context" "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/fs" "net/http" + "os" + "strings" "time" "cloud.google.com/go/auth/internal/transport/cert" ) -// x509Provider implements the subjectTokenProvider type for -// x509 workload identity credentials. Because x509 credentials -// rely on an mTLS connection to represent the 3rd party identity -// rather than a subject token, this provider will always return -// an empty string when a subject token is requested by the external account -// token provider. +// x509Provider implements the subjectTokenProvider type for x509 workload +// identity credentials. This provider retrieves and formats a JSON array +// containing the leaf certificate and trust chain (if provided) as +// base64-encoded strings. This JSON array serves as the subject token for +// mTLS authentication. type x509Provider struct { + // TrustChainPath is the path to the file containing the trust chain certificates. + // The file should contain one or more PEM-encoded certificates. + TrustChainPath string + // ConfigFilePath is the path to the configuration file containing the path + // to the leaf certificate file. + ConfigFilePath string } +const pemCertificateHeader = "-----BEGIN CERTIFICATE-----" + func (xp *x509Provider) providerType() string { return x509ProviderType } -func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { - return "", nil +// loadLeafCertificate loads and parses the leaf certificate from the specified +// configuration file. It retrieves the certificate path from the config file, +// reads the certificate file, and parses the certificate data. +func loadLeafCertificate(configFilePath string) (*x509.Certificate, error) { + // Get the path to the certificate file from the configuration file. + path, err := cert.GetCertificatePath(configFilePath) + if err != nil { + return nil, fmt.Errorf("failed to get certificate path from config file: %w", err) + } + leafCertBytes, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read leaf certificate file: %w", err) + } + // Parse the certificate bytes. + return parseCertificate(leafCertBytes) +} + +// encodeCert encodes a x509.Certificate to a base64 string. +func encodeCert(cert *x509.Certificate) string { + // cert.Raw contains the raw DER-encoded certificate. Encode the raw certificate bytes to base64. + return base64.StdEncoding.EncodeToString(cert.Raw) +} + +// parseCertificate parses a PEM-encoded certificate from the given byte slice. +func parseCertificate(certData []byte) (*x509.Certificate, error) { + if len(certData) == 0 { + return nil, errors.New("invalid certificate data: empty input") + } + // Decode the PEM-encoded data. + block, _ := pem.Decode(certData) + if block == nil { + return nil, errors.New("invalid PEM-encoded certificate data: no PEM block found") + } + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("invalid PEM-encoded certificate data: expected CERTIFICATE block type, got %s", block.Type) + } + // Parse the DER-encoded certificate. + certificate, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + return certificate, nil +} + +// readTrustChain reads a file of PEM-encoded X.509 certificates and returns a slice of parsed certificates. +// It splits the file content into PEM certificate blocks and parses each one. +func readTrustChain(trustChainPath string) ([]*x509.Certificate, error) { + certificateTrustChain := []*x509.Certificate{} + + // If no trust chain path is provided, return an empty slice. + if trustChainPath == "" { + return certificateTrustChain, nil + } + + // Read the trust chain file. + trustChainData, err := os.ReadFile(trustChainPath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("trust chain file not found: %w", err) + } + return nil, fmt.Errorf("failed to read trust chain file: %w", err) + } + + // Split the file content into PEM certificate blocks. + certBlocks := strings.Split(string(trustChainData), pemCertificateHeader) + + // Iterate over each certificate block. + for _, certBlock := range certBlocks { + // Trim whitespace from the block. + certBlock = strings.TrimSpace(certBlock) + + if certBlock != "" { + // Add the PEM header to the block. + certData := pemCertificateHeader + "\n" + certBlock + + // Parse the certificate data. + cert, err := parseCertificate([]byte(certData)) + if err != nil { + return nil, fmt.Errorf("error parsing certificate from trust chain file: %w", err) + } + + // Append the certificate to the trust chain. + certificateTrustChain = append(certificateTrustChain, cert) + } + } + + return certificateTrustChain, nil +} + +// subjectToken retrieves the X.509 subject token. It loads the leaf +// certificate and, if a trust chain path is configured, the trust chain +// certificates. It then constructs a JSON array containing the base64-encoded +// leaf certificate and each base64-encoded certificate in the trust chain. +// The leaf certificate must be at the top of the trust chain file. This JSON +// array is used as the subject token for mTLS authentication. +func (xp *x509Provider) subjectToken(context.Context) (string, error) { + // Load the leaf certificate. + leafCert, err := loadLeafCertificate(xp.ConfigFilePath) + if err != nil { + return "", fmt.Errorf("failed to load leaf certificate: %w", err) + } + + // Read the trust chain. + trustChain, err := readTrustChain(xp.TrustChainPath) + if err != nil { + return "", fmt.Errorf("failed to read trust chain: %w", err) + } + + // Initialize the certificate chain with the leaf certificate. + certChain := []string{encodeCert(leafCert)} + + // If there is a trust chain, add certificates to the certificate chain. + if len(trustChain) > 0 { + firstCert := encodeCert(trustChain[0]) + + // If the first certificate in the trust chain is not the same as the leaf certificate, add it to the chain. + if firstCert != certChain[0] { + certChain = append(certChain, firstCert) + } + + // Iterate over the remaining certificates in the trust chain. + for i := 1; i < len(trustChain); i++ { + encoded := encodeCert(trustChain[i]) + + // Return an error if the current certificate is the same as the leaf certificate. + if encoded == certChain[0] { + return "", errors.New("the leaf certificate must be at the top of the trust chain file") + } + + // Add the current certificate to the chain. + certChain = append(certChain, encoded) + } + } + + // Convert the certificate chain to a JSON array of base64-encoded strings. + jsonChain, err := json.Marshal(certChain) + if err != nil { + return "", fmt.Errorf("failed to format certificate data: %w", err) + } + + // Return the JSON-formatted certificate chain. + return string(jsonChain), nil + } // createX509Client creates a new client that is configured with mTLS, using the diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go index 0d788547..ae39206e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -17,12 +17,14 @@ package externalaccountuser import ( "context" "errors" + "log/slog" "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // Options stores the configuration for fetching tokens with external authorized @@ -51,6 +53,8 @@ type Options struct { // Client for token request. Client *http.Client + // Logger for logging. + Logger *slog.Logger } func (c *Options) validate() bool { @@ -90,6 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { RefreshToken: opts.RefreshToken, Authentication: clientAuth, Headers: headers, + Logger: internallog.New(tp.o.Logger), }) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 720045d3..c2d320fd 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -16,12 +16,13 @@ package gdch import ( "context" - "crypto/rsa" + "crypto" "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -51,6 +53,7 @@ var ( type Options struct { STSAudience string Client *http.Client + Logger *slog.Logger } // NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a @@ -62,7 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok if o.STSAudience == "" { return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") } - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, err } @@ -75,10 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), tokenURL: f.TokenURL, aud: o.STSAudience, - pk: pk, + signer: signer, pkID: f.PrivateKeyID, certPool: certPool, client: o.Client, + logger: internallog.New(o.Logger), } return tp, nil } @@ -97,11 +101,12 @@ type gdchProvider struct { serviceIdentity string tokenURL string aud string - pk *rsa.PrivateKey + signer crypto.Signer pkID string certPool *x509.CertPool client *http.Client + logger *slog.Logger } func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { @@ -120,7 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(g.pkID), } - payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + payload, err := jwt.EncodeJWS(&h, &claims, g.signer) if err != nil { return nil, err } @@ -136,10 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } + g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, &auth.Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go new file mode 100644 index 00000000..705462c1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go @@ -0,0 +1,105 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +var ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN" +) + +// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token]. +type IDTokenIAMOptions struct { + // Client is required. + Client *http.Client + // Logger is required. + Logger *slog.Logger + UniverseDomain auth.CredentialsPropertyProvider + ServiceAccountEmail string + GenerateIDTokenRequest +} + +// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC. +type GenerateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + // Delegates are the ordered, fully-qualified resource name for service + // accounts in a delegation chain. Each service account must be granted + // roles/iam.serviceAccountTokenCreator on the next service account in the + // chain. The delegates must have the following format: + // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard + // character is required; replacing it with a project ID is invalid. + // Optional. + Delegates []string `json:"delegates,omitempty"` +} + +// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC. +type GenerateIDTokenResponse struct { + Token string `json:"token"` +} + +// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions]. +func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) { + universeDomain, err := o.UniverseDomain.GetProperty(ctx) + if err != nil { + return nil, err + } + endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1) + url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail)) + + bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes)) + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) + } + o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var tokenResp GenerateIDTokenResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + return &auth.Token{ + Value: tokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: time.Now().Add(1 * time.Hour), + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index ed53afa5..b3a99261 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -20,11 +20,13 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -74,6 +76,11 @@ type Options struct { // Client configures the underlying client used to make network requests // when fetching tokens. Required. Client *http.Client + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options) validate() error { @@ -88,6 +95,7 @@ func (o *Options) validate() error { // Token performs the exchange to get a temporary service account token to allow access to GCP. func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + logger := internallog.New(o.Logger) lifetime := defaultTokenLifetime if o.TokenLifetimeSeconds != 0 { lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) @@ -109,10 +117,12 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } + logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } + logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index 768a9daf..e1d2b150 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "log/slog" "net/http" "net/url" "strconv" @@ -26,6 +27,7 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -40,6 +42,7 @@ const ( // Options stores the configuration for making an sts exchange request. type Options struct { Client *http.Client + Logger *slog.Logger Endpoint string Request *TokenRequest Authentication ClientAuthentication @@ -80,6 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { opts.Authentication.InjectAuthentication(data, opts.Headers) encodedData := data.Encode() + logger := internallog.New(opts.Logger) req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) if err != nil { @@ -93,10 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData))) resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } + logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4..8d335cce 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -16,8 +16,10 @@ package credentials import ( "context" - "crypto/rsa" + "crypto" + "errors" "fmt" + "log/slog" "strings" "time" @@ -35,7 +37,10 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) } @@ -43,8 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions email: f.ClientEmail, audience: opts.Audience, scopes: opts.scopes(), - pk: pk, + signer: signer, pkID: f.PrivateKeyID, + logger: opts.logger(), }, nil } @@ -52,8 +58,9 @@ type selfSignedTokenProvider struct { email string audience string scopes []string - pk *rsa.PrivateKey + signer crypto.Signer pkID string + logger *slog.Logger } func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { @@ -73,9 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(tp.pkID), } - msg, err := jwt.EncodeJWS(h, c, tp.pk) + tok, err := jwt.EncodeJWS(h, c, tp.signer) if err != nil { return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) } - return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil + tp.logger.Debug("created self-signed JWT", "token", tok) + return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil } diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 30fedf95..5758e85b 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -20,12 +20,14 @@ import ( "crypto/tls" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth" detect "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" + "github.com/googleapis/gax-go/v2/internallog" ) // ClientCertProvider is a function that returns a TLS client certificate to be @@ -69,6 +71,11 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -101,6 +108,10 @@ func (o *Options) client() *http.Client { return nil } +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func (o *Options) resolveDetectOptions() *detect.DetectOptions { io := o.InternalOptions // soft-clone these so we are not updating a ref the user holds and may reuse @@ -125,6 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = detect.GoogleMTLSTokenURL } + if do.Logger == nil { + do.Logger = o.logger() + } return do } @@ -147,14 +161,21 @@ type InternalOptions struct { // service. DefaultScopes []string // SkipValidation bypasses validation on Options. It should only be used - // internally for clients that needs more control over their transport. + // internally for clients that need more control over their transport. SkipValidation bool + // SkipUniverseDomainValidation skips the verification that the universe + // domain configured for the client matches the universe domain configured + // for the credentials. It should only be used internally for clients that + // need more control over their transport. The default is false. + SkipUniverseDomainValidation bool } // AddAuthorizationMiddleware adds a middleware to the provided client's // transport that sets the Authorization header with the value produced by the // provided [cloud.google.com/go/auth.Credentials]. An error is returned only // if client or creds is nil. +// +// This function does not support setting a universe domain value on the client. func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { if client == nil || creds == nil { return fmt.Errorf("httptransport: client and tp must not be nil") @@ -173,7 +194,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er client.Transport = &authTransport{ creds: creds, base: base, - // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. } return nil } @@ -191,6 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) { ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go deleted file mode 100644 index 467c477c..00000000 --- a/vendor/cloud.google.com/go/auth/httptransport/trace.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptransport - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - cloudTraceHeader = `X-Cloud-Trace-Context` -) - -// asserts the httpFormat fulfills this foreign interface -var _ propagation.HTTPFormat = (*httpFormat)(nil) - -// httpFormat implements propagation.httpFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Cloud Trace. -type httpFormat struct{} - -// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. -func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(cloudTraceHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 32) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Cloud Trace header. -func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(cloudTraceHeader, header) -} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 274bb012..ee215b6d 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -26,7 +27,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" - "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) @@ -41,7 +42,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht - trans = addOCTransport(trans, opts) + trans = addOpenTelemetryTransport(trans, opts) switch { case opts.DisableAuthentication: // Do nothing. @@ -81,11 +82,16 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers.Set(quotaProjectHeaderKey, qp) } } + var skipUD bool + if iOpts := opts.InternalOptions; iOpts != nil { + skipUD = iOpts.SkipUniverseDomainValidation + } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ - base: trans, - creds: creds, - clientUniverseDomain: opts.UniverseDomain, + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + skipUniverseDomainValidation: skipUD, } } return trans, nil @@ -162,29 +168,37 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { return rt.RoundTrip(&newReq) } -func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { if opts.DisableTelemetry { return trans } - return &ochttp.Transport{ - Base: trans, - Propagation: &httpFormat{}, - } + return otelhttp.NewTransport(trans) } type authTransport struct { - creds *auth.Credentials - base http.RoundTripper - clientUniverseDomain string + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string + skipUniverseDomainValidation bool } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an @@ -204,7 +218,7 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { if err != nil { return nil, err } - if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" { credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go index 3be6e5bb..60634730 100644 --- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -127,6 +127,7 @@ type ExecutableConfig struct { type CertificateConfig struct { UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` CertificateConfigLocation string `json:"certificate_config_location"` + TrustChainPath string `json:"trust_chain_path"` } // ServiceAccountImpersonationInfo has impersonation configuration. diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 4308345e..6a8eab6e 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -16,7 +16,7 @@ package internal import ( "context" - "crypto/rsa" + "crypto" "crypto/x509" "encoding/json" "encoding/pem" @@ -38,8 +38,11 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. @@ -69,25 +72,27 @@ func DefaultClient() *http.Client { } // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an crypto.Signer. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { +func ParseKey(key []byte) (crypto.Signer, error) { block, _ := pem.Decode(key) if block != nil { key = block.Bytes } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) + var parsedKey crypto.PrivateKey + var err error + parsedKey, err = x509.ParsePKCS8PrivateKey(key) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) } } - parsed, ok := parsedKey.(*rsa.PrivateKey) + parsed, ok := parsedKey.(crypto.Signer) if !ok { - return nil, errors.New("private key is invalid") + return nil, errors.New("private key is not a signer") } return parsed, nil } @@ -176,6 +181,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) { // ComputeUniverseDomainProvider fetches the credentials universe domain from // the google cloud metadata service. type ComputeUniverseDomainProvider struct { + MetadataClient *metadata.Client universeDomainOnce sync.Once universeDomain string universeDomainErr error @@ -185,7 +191,7 @@ type ComputeUniverseDomainProvider struct { // metadata service. func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { c.universeDomainOnce.Do(func() { - c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient) }) if c.universeDomainErr != nil { return "", c.universeDomainErr @@ -194,14 +200,14 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string } // httpGetMetadataUniverseDomain is a package var for unit test substitution. -var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { +var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - return metadata.GetWithContext(ctx, "universe/universe_domain") + return client.GetWithContext(ctx, "universe/universe-domain") } -func getMetadataUniverseDomain(ctx context.Context) (string, error) { - universeDomain, err := httpGetMetadataUniverseDomain(ctx) +func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx, client) if err == nil { return universeDomain, nil } @@ -211,3 +217,9 @@ func getMetadataUniverseDomain(ctx context.Context) (string, error) { } return "", err } + +// FormatIAMServiceAccountResource sets a service account name in an IAM resource +// name. +func FormatIAMServiceAccountResource(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go index dc28b3c3..9bd55f51 100644 --- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -111,7 +111,7 @@ func (c *Claims) encode() (string, error) { } // EncodeJWS encodes the data using the provided key as a JSON web signature. -func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { +func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) { head, err := header.encode() if err != nil { return "", err @@ -123,7 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { ss := fmt.Sprintf("%s.%s", head, claims) h := sha256.New() h.Write([]byte(ss)) - sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index 26e037c1..b1f0fcf9 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -20,6 +20,7 @@ import ( "crypto/x509" "errors" "log" + "log/slog" "net" "net/http" "net/url" @@ -51,8 +52,14 @@ const ( mtlsMDSKey = "/run/google-mds-mtls/client.key" ) -var ( - errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") +// Type represents the type of transport used. +type Type int + +const ( + // TransportTypeUnknown represents an unknown transport type and is the default option. + TransportTypeUnknown Type = iota + // TransportTypeMTLSS2A represents the mTLS transport type using S2A. + TransportTypeMTLSS2A ) // Options is a struct that is duplicated information from the individual @@ -60,13 +67,14 @@ var ( // fields on httptransport.Options and grpctransport.Options. type Options struct { Endpoint string - DefaultMTLSEndpoint string DefaultEndpointTemplate string + DefaultMTLSEndpoint string ClientCertProvider cert.Provider Client *http.Client UniverseDomain string EnableDirectPath bool EnableDirectPathXds bool + Logger *slog.Logger } // getUniverseDomain returns the default service domain for a given Cloud @@ -94,6 +102,16 @@ func (o *Options) defaultEndpoint() string { return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) } +// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the +// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultMTLSEndpoint() string { + if o.DefaultMTLSEndpoint == "" { + return "" + } + return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + // mergedEndpoint merges a user-provided Endpoint of format host[:port] with the // default endpoint. func (o *Options) mergedEndpoint() (string, error) { @@ -112,13 +130,20 @@ func fixScheme(baseURL string) string { return baseURL } +// GRPCTransportCredentials embeds interface TransportCredentials with additional data. +type GRPCTransportCredentials struct { + credentials.TransportCredentials + Endpoint string + TransportType Type +} + // GetGRPCTransportCredsAndEndpoint returns an instance of // [google.golang.org/grpc/credentials.TransportCredentials], and the -// corresponding endpoint to use for GRPC client. -func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) { +// corresponding endpoint and transport type to use for GRPC client. +func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, error) { config, err := getTransportConfig(opts) if err != nil { - return nil, "", err + return nil, err } defaultTransportCreds := credentials.NewTLS(&tls.Config{ @@ -133,12 +158,16 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return defaultTransportCreds, config.endpoint, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress } else { - return defaultTransportCreds, config.endpoint, nil + return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil } var fallbackOpts *s2a.FallbackOptions @@ -156,9 +185,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. - return defaultTransportCreds, config.endpoint, nil + return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil } - return s2aTransportCreds, config.s2aMTLSEndpoint, nil + return &GRPCTransportCredentials{s2aTransportCreds, config.s2aMTLSEndpoint, TransportTypeMTLSS2A}, nil } // GetHTTPTransportConfig returns a client certificate source and a function for @@ -177,7 +206,11 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) if err != nil { log.Printf("Loading MTLS MDS credentials failed: %v", err) - return config.clientCertSource, nil, nil + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } } } else if config.s2aAddress != "" { s2aAddr = config.s2aAddress @@ -248,12 +281,9 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { if !shouldUseS2A(clientCertSource, opts) { return &defaultTransportConfig, nil } - if !opts.isUniverseDomainGDU() { - return nil, errUniverseNotSupportedMTLS - } - s2aAddress := GetS2AAddress() - mtlsS2AAddress := GetMTLSS2AAddress() + s2aAddress := GetS2AAddress(opts.Logger) + mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger) if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } @@ -262,7 +292,7 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { endpoint: endpoint, s2aAddress: s2aAddress, mtlsS2AAddress: mtlsS2AAddress, - s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, + s2aMTLSEndpoint: opts.defaultMTLSEndpoint(), }, nil } @@ -308,24 +338,23 @@ type transportConfig struct { // getEndpoint returns the endpoint for the service, taking into account the // user-provided endpoint override "settings.Endpoint". // -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. +// If no endpoint override is specified, we will either return the default +// endpoint or the default mTLS endpoint if a client certificate is available. // -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// You can override the default endpoint choice (mTLS vs. regular) by setting +// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable. // // If the endpoint override is an address (host:port) rather than full base // URL (ex. https://...), then the user-provided address will be merged into // the default endpoint. For example, WithEndpoint("myhost:8000") and -// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return +// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS +// endpoint. func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { if opts.Endpoint == "" { mtlsMode := getMTLSMode() if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - if !opts.isUniverseDomainGDU() { - return "", errUniverseNotSupportedMTLS - } - return opts.DefaultMTLSEndpoint, nil + return opts.defaultMTLSEndpoint(), nil } return opts.defaultEndpoint(), nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go index 36651591..6c954ae1 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -16,7 +16,6 @@ package cert import ( "crypto/tls" - "errors" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -37,10 +36,9 @@ type ecpSource struct { func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable } return (&ecpSource{ diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index e8675bf8..b2a3be23 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -37,6 +37,36 @@ type certificateConfig struct { CertConfigs certConfigs `json:"cert_configs"` } +// getconfigFilePath determines the path to the certificate configuration file. +// It first checks for the presence of an environment variable that specifies +// the file path. If the environment variable is not set, it falls back to +// a default configuration file path. +func getconfigFilePath() string { + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + return envFilePath + } + return util.GetDefaultConfigFilePath() + +} + +// GetCertificatePath retrieves the certificate file path from the provided +// configuration file. If the configFilePath is empty, it attempts to load +// the configuration from a well-known gcloud location. +// This function is exposed to allow other packages, such as the +// externalaccount package, to retrieve the certificate path without needing +// to load the entire certificate configuration. +func GetCertificatePath(configFilePath string) (string, error) { + if configFilePath == "" { + configFilePath = getconfigFilePath() + } + certFile, _, err := getCertAndKeyFiles(configFilePath) + if err != nil { + return "", err + } + return certFile, nil +} + // NewWorkloadX509CertProvider creates a certificate source // that reads a certificate and private key file from the local file system. // This is intended to be used for workload identity federation. @@ -47,14 +77,8 @@ type certificateConfig struct { // a well-known gcloud location. func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) { if configFilePath == "" { - envFilePath := util.GetConfigFilePathFromEnv() - if envFilePath != "" { - configFilePath = envFilePath - } else { - configFilePath = util.GetDefaultConfigFilePath() - } + configFilePath = getconfigFilePath() } - certFile, keyFile, err := getCertAndKeyFiles(configFilePath) if err != nil { return nil, err @@ -82,10 +106,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) func getCertAndKeyFiles(configFilePath string) (string, string, error) { jsonFile, err := os.Open(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", "", errSourceUnavailable - } - return "", "", err + return "", "", errSourceUnavailable } byteValue, err := io.ReadAll(jsonFile) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 37894bfc..a6330995 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -19,6 +19,7 @@ import ( "encoding/json" "fmt" "log" + "log/slog" "os" "strconv" "sync" @@ -39,8 +40,8 @@ var ( // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. -func GetS2AAddress() string { - getMetadataMTLSAutoConfig() +func GetS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) if !mtlsConfiguration.valid() { return "" } @@ -49,8 +50,8 @@ func GetS2AAddress() string { // GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. // Returns empty string if not set or invalid. -func GetMTLSS2AAddress() string { - getMetadataMTLSAutoConfig() +func GetMTLSS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) if !mtlsConfiguration.valid() { return "" } @@ -74,22 +75,25 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -func getMetadataMTLSAutoConfig() { +func getMetadataMTLSAutoConfig(logger *slog.Logger) { var err error mtlsOnce.Do(func() { - mtlsConfiguration, err = queryConfig() + mtlsConfiguration, err = queryConfig(logger) if err != nil { log.Printf("Getting MTLS config failed: %v", err) } }) } -var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.GetWithContext(context.Background(), configEndpointSuffix) +var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: logger, + }) + return metadataClient.GetWithContext(context.Background(), configEndpointSuffix) } -func queryConfig() (*mtlsConfig, error) { - resp, err := httpGetMetadataMTLSConfig() +func queryConfig(logger *slog.Logger) (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig(logger) if err != nil { return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index cc586ec5..5c8721ef 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -37,6 +37,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt } newDo := &credentials.DetectOptions{ // Simple types + TokenBindingType: oldDo.TokenBindingType, Audience: oldDo.Audience, Subject: oldDo.Subject, EarlyTokenRefresh: oldDo.EarlyTokenRefresh, @@ -46,9 +47,10 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt UseSelfSignedJWT: oldDo.UseSelfSignedJWT, UniverseDomain: oldDo.UniverseDomain, - // These fields are are pointer types that we just want to use exactly - // as the user set, copy the ref + // These fields are pointer types that we just want to use exactly as + // the user set, copy the ref Client: oldDo.Client, + Logger: oldDo.Logger, AuthHandlerOptions: oldDo.AuthHandlerOptions, } diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index 7faf6e0c..42716752 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,33 @@ # Changelog +## [0.2.8](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.7...auth/oauth2adapt/v0.2.8) (2025-03-17) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd)) + +## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + +## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) + + +### Bug Fixes + +* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161) + +## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) + + +### Bug Fixes + +* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52)) + ## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go index 9835ac57..9cc33e5e 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -26,6 +26,13 @@ import ( "golang.org/x/oauth2/google" ) +const ( + oauth2TokenSourceKey = "oauth2.google.tokenSource" + oauth2ServiceAccountKey = "oauth2.google.serviceAccount" + authTokenSourceKey = "auth.google.tokenSource" + authServiceAccountKey = "auth.google.serviceAccount" +) + // TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] // into a [cloud.google.com/go/auth.TokenProvider]. func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { @@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { } return nil, err } + // Preserve compute token metadata, for both types of tokens. + metadata := map[string]interface{}{} + if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok { + metadata[authTokenSourceKey] = val + metadata[oauth2TokenSourceKey] = val + } + if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok { + metadata[authServiceAccountKey] = val + metadata[oauth2ServiceAccountKey] = val + } return &auth.Token{ - Value: tok.AccessToken, - Type: tok.Type(), - Expiry: tok.Expiry, + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + Metadata: metadata, }, nil } @@ -76,11 +94,29 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { } return nil, err } - return &oauth2.Token{ + tok2 := &oauth2.Token{ AccessToken: tok.Value, TokenType: tok.Type, Expiry: tok.Expiry, - }, nil + } + // Preserve token metadata. + m := tok.Metadata + if m != nil { + // Copy map to avoid concurrent map writes error (#11161). + metadata := make(map[string]interface{}, len(m)+2) + for k, v := range m { + metadata[k] = v + } + // Append compute token metadata in converted form. + if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { + metadata[oauth2TokenSourceKey] = val + } + if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" { + metadata[oauth2ServiceAccountKey] = val + } + tok2 = tok2.WithExtra(metadata) + } + return tok2, nil } // AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 97a57f46..07804dc1 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "mime" "net/http" "net/url" @@ -28,6 +29,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for @@ -69,6 +71,11 @@ type Options3LO struct { // AuthHandlerOpts provides a set of options for doing a // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. AuthHandlerOpts *AuthorizationHandlerOptions + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options3LO) validate() error { @@ -96,6 +103,10 @@ func (o *Options3LO) validate() error { return nil } +func (o *Options3LO) logger() *slog.Logger { + return internallog.New(o.Logger) +} + // PKCEOptions holds parameters to support PKCE. type PKCEOptions struct { // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. @@ -293,12 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin if o.AuthStyle == StyleInHeader { req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) } + logger := o.logger() + logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) // Make request resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } + logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body)) failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md b/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md index 999f8f78..a3d3790c 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md +++ b/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md @@ -1,5 +1,86 @@ # Changelog +## [1.16.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.16.0...v1.16.1) (2025-04-16) + + +### Bug Fixes + +* bump dependencies to latest ([#968](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/968)) ([bb3c9f4](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/bb3c9f4cb37fbb86a396fc716992c2ac82dcbcff)) + +## [1.16.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.15.0...v1.16.0) (2025-03-19) + + +### Features + +* add domain name validation ([#925](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/925)) ([986152f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/986152ff1b733ad70b43bad1d22a450376ce5021)) +* Use standard TLS hostname validation for instances with DNS names ([#954](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/954)) ([d733a16](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/d733a161c5020a1013151a11ec385eba5ea50ce5)) + +## [1.15.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.14.2...v1.15.0) (2025-02-12) + + +### Features + +* add support for Go 1.24 and drop Go 1.22 ([#942](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/942)) ([e10fbd2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/e10fbd2f790bc17fc7eb72ea785b11efddfb451f)) + + +### Bug Fixes + +* Refresh client cert when CA rotation ([#934](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/934)) ([c22e2d4](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/c22e2d4e861df7cf317af16c1015d40811dae3d8)), closes [#932](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/932) + +## [1.14.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.14.1...v1.14.2) (2025-01-30) + + +### Bug Fixes + +* pass headers to transport ([#928](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/928)) ([08be3ab](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/08be3aba5c5d2a6a69c370e1a2772b2f73435a1f)) + +## [1.14.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.14.0...v1.14.1) (2025-01-23) + + +### Bug Fixes + +* correct default usage for `WithQuotaProject` and `WithUserAgent` ([#920](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/920)) ([8520c3d](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/8520c3d938e5011eb77ff5b5dc08c4e94e691a16)) + +## [1.14.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.13.2...v1.14.0) (2025-01-14) + + +### Features + +* Custom SAN Support ([#911](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/911)) ([772fae4](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/772fae4ffdf19c3f4fb8e024845c976008e1e30f)) +* drop support for Go 1.21 ([#906](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/906)) ([12c1618](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/12c1618f3c29aad0d32f16dcf445b57422380137)) +* Support server certificates from a private CA ([#899](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/pull/899)) ([a8ed925](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/a8ed9259825912f78b2c111d9c38f7b00b6eee94)) + + +### Bug Fixes + +* use auth DetectDefault over oauth2 FindDefaultCredentials ([#909](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/909)) ([52fef27](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/52fef27f82bb4c47c3ee96a06a2046da99d77225)) + +## [1.13.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.13.1...v1.13.2) (2024-12-10) + + +### Bug Fixes + +* bump dependencies to latest ([#893](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/893)) ([4467ed4](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4467ed4e8456c453fd5b616d02eeaeed41d5d921)) + +## [1.13.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.13.0...v1.13.1) (2024-11-20) + + +### Bug Fixes + +* update dependencies to latest ([#884](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/884)) ([dc85de5](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/dc85de51bda5eb7d5c28b691dd847a4d1e5ef953)) + +## [1.13.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.12.1...v1.13.0) (2024-10-23) + + +### Features + +* Automatically reset connection when the DNS record changes. ([#868](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/868)) ([4d7abd8](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4d7abd877edf5fba3173b69e14181b6ddf911b24)) + + +### Bug Fixes + +* update bytes_sent and bytes_received to use Sum ([#874](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/874)) ([73b6f38](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/73b6f3860ef28dedd995a41b74b5f12168d3ff06)) + ## [1.12.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.12.0...v1.12.1) (2024-09-19) diff --git a/vendor/cloud.google.com/go/cloudsqlconn/README.md b/vendor/cloud.google.com/go/cloudsqlconn/README.md index ef77c41b..c18419fc 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/README.md +++ b/vendor/cloud.google.com/go/cloudsqlconn/README.md @@ -234,7 +234,8 @@ func connect() { // ... etc } ``` -### Using DNS to identify an instance + +### Using DNS domain names to identify instances The connector can be configured to use DNS to look up an instance. This would allow you to configure your application to connect to a database instance, and @@ -259,7 +260,7 @@ For example: suppose you wanted to use the domain name #### Configure the connector -Configure the connector as described above, replacing the conenctor ID with +Configure the connector as described above, replacing the connector ID with the DNS name. Adapting the MySQL + database/sql example above: @@ -276,8 +277,7 @@ import ( func connect() { cleanup, err := mysql.RegisterDriver("cloudsql-mysql", - cloudsqlconn.WithDNSResolver(), - cloudsqlconn.WithCredentialsFile("key.json")) + cloudsqlconn.WithDNSResolver()) if err != nil { // ... handle error } @@ -292,6 +292,40 @@ func connect() { } ``` +### Automatic fail-over using DNS domain names + +When the connector is configured using a domain name, the connector will +periodically check if the DNS record for an instance changes. When the connector +detects that the domain name refers to a different instance, the connector will +close all open connections to the old instance. Subsequent connection attempts +will be directed to the new instance. + +For example: suppose application is configured to connect using the +domain name `prod-db.mycompany.example.com`. Initially the corporate DNS +zone has a TXT record with the value `my-project:region:my-instance`. The +application establishes connections to the `my-project:region:my-instance` +Cloud SQL instance. + +Then, to reconfigure the application to use a different database +instance, change the value of the `prod-db.mycompany.example.com` DNS record +from `my-project:region:my-instance` to `my-project:other-region:my-instance-2` + +The connector inside the application detects the change to this +DNS record. Now, when the application connects to its database using the +domain name `prod-db.mycompany.example.com`, it will connect to the +`my-project:other-region:my-instance-2` Cloud SQL instance. + +The connector will automatically close all existing connections to +`my-project:region:my-instance`. This will force the connection pools to +establish new connections. Also, it may cause database queries in progress +to fail. + +The connector will poll for changes to the DNS name every 30 seconds by default. +You may configure the frequency of the connections using the option +`WithFailoverPeriod(d time.Duration)`. When this is set to 0, the connector will +disable polling and only check if the DNS record changed when it is +creating a new connection. + ### Using Options diff --git a/vendor/cloud.google.com/go/cloudsqlconn/dialer.go b/vendor/cloud.google.com/go/cloudsqlconn/dialer.go index 8a62a997..5c6fe7d9 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/dialer.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/dialer.go @@ -24,11 +24,16 @@ import ( "fmt" "io" "net" + "net/http" + "os" "strings" "sync" "sync/atomic" "time" + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/cloudsqlconn/debug" "cloud.google.com/go/cloudsqlconn/errtype" "cloud.google.com/go/cloudsqlconn/instance" @@ -36,8 +41,6 @@ import ( "cloud.google.com/go/cloudsqlconn/internal/trace" "github.com/google/uuid" "golang.org/x/net/proxy" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" "google.golang.org/api/option" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) @@ -50,6 +53,12 @@ const ( // iamLoginScope is the OAuth2 scope used for tokens embedded in the ephemeral // certificate. iamLoginScope = "https://www.googleapis.com/auth/sqlservice.login" + // universeDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + universeDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + // defaultUniverseDomain is the default value for universe domain. + // Universe domain is the default service domain for a given Cloud universe. + defaultUniverseDomain = "googleapis.com" ) var ( @@ -110,12 +119,30 @@ type connectionInfoCache interface { io.Closer } -// monitoredCache is a wrapper around a connectionInfoCache that tracks the -// number of connections to the associated instance. -type monitoredCache struct { - openConns *uint64 +type cacheKey struct { + domainName string + project string + region string + name string +} - connectionInfoCache +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. +func (c *dialerConfig) getClientUniverseDomain() string { + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(universeDomainEnvVar); envUD != "" { + return envUD + } + return defaultUniverseDomain } // A Dialer is used to create connections to Cloud SQL instances. @@ -123,7 +150,7 @@ type monitoredCache struct { // Use NewDialer to initialize a Dialer. type Dialer struct { lock sync.RWMutex - cache map[instance.ConnName]monitoredCache + cache map[cacheKey]*monitoredCache keyGenerator *keyGenerator refreshTimeout time.Duration // closed reports if the dialer has been closed. @@ -151,11 +178,12 @@ type Dialer struct { // network. By default, it is golang.org/x/net/proxy#Dial. dialFunc func(cxt context.Context, network, addr string) (net.Conn, error) - // iamTokenSource supplies the OAuth2 token used for IAM DB Authn. - iamTokenSource oauth2.TokenSource + // iamTokenProvider supplies the OAuth2 token used for IAM DB Authn. + iamTokenProvider auth.TokenProvider // resolver converts instance names into DNS names. - resolver instance.ConnectionNameResolver + resolver instance.ConnectionNameResolver + failoverPeriod time.Duration } var ( @@ -174,11 +202,11 @@ func (nullLogger) Debugf(_ context.Context, _ string, _ ...interface{}) {} // RSA keypair is generated will be faster. func NewDialer(ctx context.Context, opts ...Option) (*Dialer, error) { cfg := &dialerConfig{ - refreshTimeout: cloudsql.RefreshTimeout, - dialFunc: proxy.Dial, - logger: nullLogger{}, - useragents: []string{userAgent}, - serviceUniverse: "googleapis.com", + refreshTimeout: cloudsql.RefreshTimeout, + dialFunc: proxy.Dial, + logger: nullLogger{}, + useragents: []string{userAgent}, + failoverPeriod: cloudsql.FailoverPeriod, } for _, opt := range opts { opt(cfg) @@ -192,43 +220,57 @@ func NewDialer(ctx context.Context, opts ...Option) (*Dialer, error) { if cfg.setIAMAuthNTokenSource && !cfg.useIAMAuthN { return nil, errUseTokenSource } - // Add this to the end to make sure it's not overridden - cfg.sqladminOpts = append(cfg.sqladminOpts, option.WithUserAgent(strings.Join(cfg.useragents, " "))) - // If callers have not provided a token source, either explicitly with - // WithTokenSource or implicitly with WithCredentialsJSON etc., then use the - // default token source. + // If callers have not provided a credential source, either explicitly with + // WithTokenSource or implicitly with WithCredentialsJSON etc., then use + // default credentials if !cfg.setCredentials { - c, err := google.FindDefaultCredentials(ctx, sqladmin.SqlserviceAdminScope) + c, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: []string{sqladmin.SqlserviceAdminScope}, + }) if err != nil { return nil, fmt.Errorf("failed to create default credentials: %v", err) } - ud, err := c.GetUniverseDomain() + cfg.authCredentials = c + // create second set of credentials, scoped for IAM AuthN login only + scoped, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: []string{iamLoginScope}, + }) if err != nil { - return nil, fmt.Errorf("failed to get universe domain: %v", err) + return nil, fmt.Errorf("failed to create scoped credentials: %v", err) } - cfg.credentialsUniverse = ud - cfg.sqladminOpts = append(cfg.sqladminOpts, option.WithTokenSource(c.TokenSource)) - scoped, err := google.DefaultTokenSource(ctx, iamLoginScope) - if err != nil { - return nil, fmt.Errorf("failed to create scoped token source: %v", err) + cfg.iamLoginTokenProvider = scoped.TokenProvider + } + + // For all credential paths, use auth library's built-in + // httptransport.NewClient + if cfg.authCredentials != nil { + // Set headers for auth client as below WithHTTPClient will ignore + // WithQuotaProject and WithUserAgent Options + headers := http.Header{} + headers.Set("User-Agent", strings.Join(cfg.useragents, " ")) + if cfg.quotaProject != "" { + headers.Set("X-Goog-User-Project", cfg.quotaProject) } - cfg.iamLoginTokenSource = scoped - } - if cfg.setUniverseDomain && cfg.setAdminAPIEndpoint { - return nil, errors.New( - "can not use WithAdminAPIEndpoint and WithUniverseDomain Options together, " + - "use WithAdminAPIEndpoint (it already contains the universe domain)", - ) - } - - if cfg.credentialsUniverse != "" && cfg.serviceUniverse != "" { - if cfg.credentialsUniverse != cfg.serviceUniverse { - return nil, fmt.Errorf( - "the configured service universe domain (%s) does not match the credential universe domain (%s)", - cfg.serviceUniverse, cfg.credentialsUniverse, - ) + authClient, err := httptransport.NewClient(&httptransport.Options{ + Headers: headers, + Credentials: cfg.authCredentials, + UniverseDomain: cfg.getClientUniverseDomain(), + }) + if err != nil { + return nil, fmt.Errorf("failed to create auth client: %v", err) + } + // If callers have not provided an HTTPClient explicitly with + // WithHTTPClient, then use auth client + if !cfg.setHTTPClient { + cfg.sqladminOpts = append(cfg.sqladminOpts, option.WithHTTPClient(authClient)) + } + } else { + // Add this to the end to make sure it's not overridden + cfg.sqladminOpts = append(cfg.sqladminOpts, option.WithUserAgent(strings.Join(cfg.useragents, " "))) + if cfg.quotaProject != "" { + cfg.sqladminOpts = append(cfg.sqladminOpts, option.WithQuotaProject(cfg.quotaProject)) } } @@ -263,7 +305,7 @@ func NewDialer(ctx context.Context, opts ...Option) (*Dialer, error) { d := &Dialer{ closed: make(chan struct{}), - cache: make(map[instance.ConnName]monitoredCache), + cache: make(map[cacheKey]*monitoredCache), lazyRefresh: cfg.lazyRefresh, keyGenerator: g, refreshTimeout: cfg.refreshTimeout, @@ -271,10 +313,12 @@ func NewDialer(ctx context.Context, opts ...Option) (*Dialer, error) { logger: cfg.logger, defaultDialConfig: dc, dialerID: uuid.New().String(), - iamTokenSource: cfg.iamLoginTokenSource, + iamTokenProvider: cfg.iamLoginTokenProvider, dialFunc: cfg.dialFunc, resolver: r, + failoverPeriod: cfg.failoverPeriod, } + return d, nil } @@ -301,6 +345,10 @@ func (d *Dialer) Dial(ctx context.Context, icn string, opts ...DialOption) (conn if err != nil { return nil, err } + // Log if resolver changed the instance name input string. + if cn.String() != icn { + d.logger.Debugf(ctx, "resolved instance %s to %s", icn, cn) + } cfg := d.defaultDialConfig for _, opt := range opts { @@ -371,31 +419,65 @@ func (d *Dialer) Dial(ctx context.Context, icn string, opts ...DialOption) (conn tlsConn := tls.Client(conn, ci.TLSConfig()) err = tlsConn.HandshakeContext(ctx) if err != nil { + // TLS handshake errors are fatal and require a refresh. Remove the instance + // from the cache so that future calls to Dial() will block until the + // certificate is refreshed successfully. d.logger.Debugf(ctx, "[%v] TLS handshake failed: %v", cn.String(), err) - // refresh the instance info in case it caused the handshake failure - c.ForceRefresh() + d.removeCached(ctx, cn, c, err) _ = tlsConn.Close() // best effort close attempt return nil, errtype.NewDialError("handshake failed", cn.String(), err) } latency := time.Since(startTime).Milliseconds() go func() { - n := atomic.AddUint64(c.openConns, 1) + n := atomic.AddUint64(c.openConnsCount, 1) trace.RecordOpenConnections(ctx, int64(n), d.dialerID, cn.String()) trace.RecordDialLatency(ctx, icn, d.dialerID, latency) }() - return newInstrumentedConn(tlsConn, func() { - n := atomic.AddUint64(c.openConns, ^uint64(0)) + closeFunc := func() { + n := atomic.AddUint64(c.openConnsCount, ^uint64(0)) // c.openConnsCount = c.openConnsCount - 1 trace.RecordOpenConnections(context.Background(), int64(n), d.dialerID, cn.String()) - }, d.dialerID, cn.String()), nil + } + errFunc := func(err error) { + // io.EOF occurs when the server closes the connection. This is safe to + // ignore. + if err == io.EOF { + return + } + d.logger.Debugf(ctx, "[%v] IO Error on Read or Write: %v", cn.String(), err) + if d.isTLSError(err) { + // TLS handshake errors are fatal. Remove the instance from the cache + // so that future calls to Dial() will block until the certificate + // is refreshed successfully. + d.removeCached(ctx, cn, c, err) + _ = tlsConn.Close() // best effort close attempt + } + } + iConn := newInstrumentedConn(tlsConn, closeFunc, errFunc, d.dialerID, cn.String()) + + // If this connection was opened using a Domain Name, then store it for later + // in case it needs to be forcibly closed. + if cn.HasDomainName() { + c.mu.Lock() + c.openConns = append(c.openConns, iConn) + c.mu.Unlock() + } + return iConn, nil +} +func (d *Dialer) isTLSError(err error) bool { + if nErr, ok := err.(net.Error); ok { + return !nErr.Timeout() && // it's a permanent net error + strings.Contains(nErr.Error(), "tls") // it's a TLS-related error + } + return false } -// removeCached stops all background refreshes and deletes the connection -// info cache from the map of caches. +// removeCached stops all background refreshes, closes open sockets, and deletes +// the cache entry. func (d *Dialer) removeCached( ctx context.Context, - i instance.ConnName, c connectionInfoCache, err error, + i instance.ConnName, c *monitoredCache, err error, ) { d.logger.Debugf( ctx, @@ -403,10 +485,19 @@ func (d *Dialer) removeCached( i.String(), err, ) + + // If this instance of monitoredCache is still in the cache, remove it. + // If this instance was already removed from the cache or + // if *a separate goroutine* replaced it with a new instance, do nothing. + key := createKey(i) d.lock.Lock() - defer d.lock.Unlock() + if cachedC, ok := d.cache[key]; ok && cachedC == c { + delete(d.cache, key) + } + d.lock.Unlock() + + // Close the monitoredCache, this call is idempotent. c.Close() - delete(d.cache, i) } // validClientCert checks that the ephemeral client certificate retrieved from @@ -479,10 +570,11 @@ func (d *Dialer) Warmup(ctx context.Context, icn string, opts ...DialOption) err // newInstrumentedConn initializes an instrumentedConn that on closing will // decrement the number of open connects and record the result. -func newInstrumentedConn(conn net.Conn, closeFunc func(), dialerID, connName string) *instrumentedConn { +func newInstrumentedConn(conn net.Conn, closeFunc func(), errFunc func(error), dialerID, connName string) *instrumentedConn { return &instrumentedConn{ Conn: conn, closeFunc: closeFunc, + errFunc: errFunc, dialerID: dialerID, connName: connName, } @@ -493,6 +585,9 @@ func newInstrumentedConn(conn net.Conn, closeFunc func(), dialerID, connName str type instrumentedConn struct { net.Conn closeFunc func() + errFunc func(error) + mu sync.RWMutex + closed bool dialerID string connName string } @@ -503,6 +598,8 @@ func (i *instrumentedConn) Read(b []byte) (int, error) { bytesRead, err := i.Conn.Read(b) if err == nil { go trace.RecordBytesReceived(context.Background(), int64(bytesRead), i.connName, i.dialerID) + } else { + i.errFunc(err) } return bytesRead, err } @@ -513,13 +610,25 @@ func (i *instrumentedConn) Write(b []byte) (int, error) { bytesWritten, err := i.Conn.Write(b) if err == nil { go trace.RecordBytesSent(context.Background(), int64(bytesWritten), i.connName, i.dialerID) + } else { + i.errFunc(err) } return bytesWritten, err } +// isClosed returns true if this connection is closing or is already closed. +func (i *instrumentedConn) isClosed() bool { + i.mu.RLock() + defer i.mu.RUnlock() + return i.closed +} + // Close delegates to the underlying net.Conn interface and reports the close // to the provided closeFunc only when Close returns no error. func (i *instrumentedConn) Close() error { + i.mu.Lock() + defer i.mu.Unlock() + i.closed = true err := i.Conn.Close() if err != nil { return err @@ -546,55 +655,81 @@ func (d *Dialer) Close() error { return nil } +// createKey creates a key for the cache from an instance.ConnName. +// An instance.ConnName uniquely identifies a connection using +// project:region:instance + domainName. However, in the dialer cache, +// we want to to identify entries either by project:region:instance, or +// by domainName, but not the combination of the two. +func createKey(cn instance.ConnName) cacheKey { + if cn.HasDomainName() { + return cacheKey{domainName: cn.DomainName()} + } + return cacheKey{ + name: cn.Name(), + project: cn.Project(), + region: cn.Region(), + } +} + // connectionInfoCache is a helper function for returning the appropriate // connection info Cache in a threadsafe way. It will create a new cache, // modify the existing one, or leave it unchanged as needed. func (d *Dialer) connectionInfoCache( ctx context.Context, cn instance.ConnName, useIAMAuthN *bool, -) (monitoredCache, error) { +) (*monitoredCache, error) { + k := createKey(cn) + d.lock.RLock() - c, ok := d.cache[cn] + c, ok := d.cache[k] d.lock.RUnlock() - if !ok { - d.lock.Lock() - defer d.lock.Unlock() - // Recheck to ensure instance wasn't created or changed between locks - c, ok = d.cache[cn] - if !ok { - var useIAMAuthNDial bool - if useIAMAuthN != nil { - useIAMAuthNDial = *useIAMAuthN - } - d.logger.Debugf(ctx, "[%v] Connection info added to cache", cn.String()) - k, err := d.keyGenerator.rsaKey() - if err != nil { - return monitoredCache{}, err - } - var cache connectionInfoCache - if d.lazyRefresh { - cache = cloudsql.NewLazyRefreshCache( - cn, - d.logger, - d.sqladmin, k, - d.refreshTimeout, d.iamTokenSource, - d.dialerID, useIAMAuthNDial, - ) - } else { - cache = cloudsql.NewRefreshAheadCache( - cn, - d.logger, - d.sqladmin, k, - d.refreshTimeout, d.iamTokenSource, - d.dialerID, useIAMAuthNDial, - ) - } - var count uint64 - c = monitoredCache{openConns: &count, connectionInfoCache: cache} - d.cache[cn] = c - } + + if ok && !c.isClosed() { + c.UpdateRefresh(useIAMAuthN) + return c, nil } - c.UpdateRefresh(useIAMAuthN) + d.lock.Lock() + defer d.lock.Unlock() + + // Recheck to ensure instance wasn't created or changed between locks + c, ok = d.cache[k] + + // c exists and is not closed + if ok && !c.isClosed() { + c.UpdateRefresh(useIAMAuthN) + return c, nil + } + + // Create a new instance of monitoredCache + var useIAMAuthNDial bool + if useIAMAuthN != nil { + useIAMAuthNDial = *useIAMAuthN + } + d.logger.Debugf(ctx, "[%v] Connection info added to cache", cn.String()) + rsaKey, err := d.keyGenerator.rsaKey() + if err != nil { + return nil, err + } + var cache connectionInfoCache + if d.lazyRefresh { + cache = cloudsql.NewLazyRefreshCache( + cn, + d.logger, + d.sqladmin, rsaKey, + d.refreshTimeout, d.iamTokenProvider, + d.dialerID, useIAMAuthNDial, + ) + } else { + cache = cloudsql.NewRefreshAheadCache( + cn, + d.logger, + d.sqladmin, rsaKey, + d.refreshTimeout, d.iamTokenProvider, + d.dialerID, useIAMAuthNDial, + ) + } + c = newMonitoredCache(ctx, cache, cn, d.failoverPeriod, d.resolver, d.logger) + d.cache[k] = c return c, nil } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go b/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go index 2dd3de73..8be77bb0 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go @@ -27,17 +27,23 @@ var ( // Additionally, we have to support legacy "domain-scoped" projects // (e.g. "google.com:PROJECT") connNameRegex = regexp.MustCompile("([^:]+(:[^:]+)?):([^:]+):([^:]+)") + // The domain name pattern in accordance with RFC 1035, RFC 1123 and RFC 2181. + domainNameRegex = regexp.MustCompile(`^(?:[_a-z0-9](?:[_a-z0-9-]{0,61}[a-z0-9])?\.)+(?:[a-z](?:[a-z0-9-]{0,61}[a-z0-9])?)?$`) ) // ConnName represents the "instance connection name", in the format // "project:region:name". type ConnName struct { - project string - region string - name string + project string + region string + name string + domainName string } func (c *ConnName) String() string { + if c.domainName != "" { + return fmt.Sprintf("%s -> %s:%s:%s", c.domainName, c.project, c.region, c.name) + } return fmt.Sprintf("%s:%s:%s", c.project, c.region, c.name) } @@ -56,8 +62,34 @@ func (c *ConnName) Name() string { return c.name } +// DomainName returns the domain name for this instance +func (c *ConnName) DomainName() string { + return c.domainName +} + +// HasDomainName returns whether the Cloud SQL instance has a domain name +func (c *ConnName) HasDomainName() bool { + return c.domainName != "" +} + +// IsValidDomain validates that a string is a well-formed domain name +func IsValidDomain(dn string) bool { + b := []byte(dn) + m := domainNameRegex.FindSubmatch(b) + if m == nil { + return false + } + return true +} + // ParseConnName initializes a new ConnName struct. func ParseConnName(cn string) (ConnName, error) { + return ParseConnNameWithDomainName(cn, "") +} + +// ParseConnNameWithDomainName initializes a new ConnName struct, +// also setting the domain name. +func ParseConnNameWithDomainName(cn string, dn string) (ConnName, error) { b := []byte(cn) m := connNameRegex.FindSubmatch(b) if m == nil { @@ -69,9 +101,10 @@ func ParseConnName(cn string) (ConnName, error) { } c := ConnName{ - project: string(m[1]), - region: string(m[3]), - name: string(m[4]), + project: string(m[1]), + region: string(m[3]), + name: string(m[4]), + domainName: dn, } return c, nil } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go index f8e44b3b..10e32af6 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go @@ -23,10 +23,10 @@ import ( "sync" "time" + "cloud.google.com/go/auth" "cloud.google.com/go/cloudsqlconn/debug" "cloud.google.com/go/cloudsqlconn/errtype" "cloud.google.com/go/cloudsqlconn/instance" - "golang.org/x/oauth2" "golang.org/x/time/rate" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) @@ -45,6 +45,11 @@ const ( // refreshInterval. RefreshTimeout = 60 * time.Second + // FailoverPeriod is the frequency with which the dialer will check + // if the DNS record has changed for connections configured using + // a DNS name. + FailoverPeriod = 30 * time.Second + // refreshBurst is the initial burst allowed by the rate limiter. refreshBurst = 2 ) @@ -124,7 +129,7 @@ func NewRefreshAheadCache( client *sqladmin.Service, key *rsa.PrivateKey, refreshTimeout time.Duration, - ts oauth2.TokenSource, + tp auth.TokenProvider, dialerID string, useIAMAuthNDial bool, ) *RefreshAheadCache { @@ -137,7 +142,7 @@ func NewRefreshAheadCache( l, client, key, - ts, + tp, dialerID, ), refreshTimeout: refreshTimeout, @@ -236,31 +241,45 @@ func (c ConnectionInfo) TLSConfig() *tls.Config { for _, caCert := range c.ServerCACert { pool.AddCert(caCert) } - if c.ServerCAMode == "GOOGLE_MANAGED_CAS_CA" { - // For CAS instances, we can rely on the DNS name to verify the server identity. + + // If the instance metadata does not contain a domain name, use the legacy + // validation checking the CN field for the instance connection name. + if c.DNSName == "" { return &tls.Config{ - ServerName: c.DNSName, + ServerName: c.ConnectionName.String(), Certificates: []tls.Certificate{c.ClientCertificate}, RootCAs: pool, - MinVersion: tls.VersionTLS13, + // We need to set InsecureSkipVerify to true due to + // https://github.com/GoogleCloudPlatform/cloudsql-proxy/issues/194 + // https://tip.golang.org/doc/go1.11#crypto/x509 + // + // Since we have a secure channel to the Cloud SQL API which we use to + // retrieve the certificates, we instead need to implement our own + // VerifyPeerCertificate function that will verify that the certificate + // is OK. + InsecureSkipVerify: true, + VerifyPeerCertificate: verifyPeerCertificateFunc(c.ConnectionName, pool), + MinVersion: tls.VersionTLS13, } } + + // If the connector was configured with a domain name, use that domain name + // to validate the certificate. Otherwise, use the DNS name from the + // instance metadata retrieved from the ConnectSettings API endpoint. + serverName := c.ConnectionName.DomainName() + if serverName == "" { + serverName = c.DNSName + } + + // By default, use Standard TLS hostname verification name to + // verify the server identity. return &tls.Config{ - ServerName: c.ConnectionName.String(), + ServerName: serverName, Certificates: []tls.Certificate{c.ClientCertificate}, RootCAs: pool, - // We need to set InsecureSkipVerify to true due to - // https://github.com/GoogleCloudPlatform/cloudsql-proxy/issues/194 - // https://tip.golang.org/doc/go1.11#crypto/x509 - // - // Since we have a secure channel to the Cloud SQL API which we use to - // retrieve the certificates, we instead need to implement our own - // VerifyPeerCertificate function that will verify that the certificate - // is OK. - InsecureSkipVerify: true, - VerifyPeerCertificate: verifyPeerCertificateFunc(c.ConnectionName, pool), - MinVersion: tls.VersionTLS13, + MinVersion: tls.VersionTLS13, } + } // verifyPeerCertificateFunc creates a VerifyPeerCertificate func that diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/lazy.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/lazy.go index 5b65b3b9..b99041a3 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/lazy.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/lazy.go @@ -20,13 +20,13 @@ import ( "sync" "time" + "cloud.google.com/go/auth" "cloud.google.com/go/cloudsqlconn/debug" "cloud.google.com/go/cloudsqlconn/instance" - "golang.org/x/oauth2" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) -// LazyRefreshCache is caches connection info and refreshes the cache only when +// LazyRefreshCache caches connection info and refreshes the cache only when // a caller requests connection info and the current certificate is expired. type LazyRefreshCache struct { connName instance.ConnName @@ -45,7 +45,7 @@ func NewLazyRefreshCache( client *sqladmin.Service, key *rsa.PrivateKey, _ time.Duration, - ts oauth2.TokenSource, + tp auth.TokenProvider, dialerID string, useIAMAuthNDial bool, ) *LazyRefreshCache { @@ -56,7 +56,7 @@ func NewLazyRefreshCache( l, client, key, - ts, + tp, dialerID, ), useIAMAuthNDial: useIAMAuthNDial, diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go index 152d99a4..23dd84f2 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go @@ -22,13 +22,12 @@ import ( "encoding/pem" "fmt" "strings" - "time" + "cloud.google.com/go/auth" "cloud.google.com/go/cloudsqlconn/debug" "cloud.google.com/go/cloudsqlconn/errtype" "cloud.google.com/go/cloudsqlconn/instance" "cloud.google.com/go/cloudsqlconn/internal/trace" - "golang.org/x/oauth2" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) @@ -101,8 +100,26 @@ func fetchMetadata( // resolve DnsName into IP address for PSC // Note that we have to check for PSC enablement first because CAS instances also set the DnsName. - if db.PscEnabled && db.DnsName != "" { - ipAddrs[PSC] = db.DnsName + if db.PscEnabled { + // Search the dns_names field for the PSC DNS Name. + pscDNSName := "" + for _, dnm := range db.DnsNames { + if dnm.Name != "" && + dnm.ConnectionType == "PRIVATE_SERVICE_CONNECT" && dnm.DnsScope == "INSTANCE" { + pscDNSName = dnm.Name + break + } + } + + // If the psc dns name was not found, use the legacy dns_name field + if pscDNSName == "" && db.DnsName != "" { + pscDNSName = db.DnsName + } + + // If the psc dns name was found, add it to the ipaddrs map. + if pscDNSName != "" { + ipAddrs[PSC] = pscDNSName + } } if len(ipAddrs) == 0 { @@ -129,38 +146,28 @@ func fetchMetadata( caCerts = append(caCerts, caCert) } + // Find a DNS name to use to validate the certificate from the dns_names field. Any + // name in the list may be used to validate the server TLS certificate. + // Fall back to legacy dns_name field if necessary. + var serverName string + if len(db.DnsNames) > 0 { + serverName = db.DnsNames[0].Name + } + if serverName == "" { + serverName = db.DnsName + } + m = metadata{ ipAddrs: ipAddrs, serverCACert: caCerts, version: db.DatabaseVersion, - dnsName: db.DnsName, + dnsName: serverName, serverCAMode: db.ServerCaMode, } return m, nil } -var expired = time.Time{}.Add(1) - -// canRefresh determines if the provided token was refreshed or if it still has -// the sentinel expiration, which means the token was provided without a -// refresh token (as with the Cloud SQL Proxy's --token flag) and therefore -// cannot be refreshed. -func canRefresh(t *oauth2.Token) bool { - return t.Expiry.Unix() != expired.Unix() -} - -// refreshToken will retrieve a new token, only if a refresh token is present. -func refreshToken(ts oauth2.TokenSource, tok *oauth2.Token) (*oauth2.Token, error) { - expiredToken := &oauth2.Token{ - AccessToken: tok.AccessToken, - TokenType: tok.TokenType, - RefreshToken: tok.RefreshToken, - Expiry: expired, - } - return oauth2.ReuseTokenSource(expiredToken, ts).Token() -} - // fetchEphemeralCert uses the Cloud SQL Admin API's createEphemeral method to // create a signed TLS certificate that authorized to connect via the Cloud SQL // instance's serverside proxy. The cert if valid for approximately one hour. @@ -169,7 +176,7 @@ func fetchEphemeralCert( client *sqladmin.Service, inst instance.ConnName, key *rsa.PrivateKey, - ts oauth2.TokenSource, + tp auth.TokenProvider, ) (c tls.Certificate, err error) { var end trace.EndSpanFunc ctx, end = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.FetchEphemeralCert") @@ -182,10 +189,10 @@ func fetchEphemeralCert( req := sqladmin.GenerateEphemeralCertRequest{ PublicKey: string(pem.EncodeToMemory(&pem.Block{Bytes: clientPubKey, Type: "RSA PUBLIC KEY"})), } - var tok *oauth2.Token - if ts != nil { + var tok *auth.Token + if tp != nil { var tokErr error - tok, tokErr = ts.Token() + tok, tokErr = tp.Token(ctx) if tokErr != nil { return tls.Certificate{}, errtype.NewRefreshError( "failed to retrieve Oauth2 token", @@ -193,17 +200,7 @@ func fetchEphemeralCert( tokErr, ) } - // Always refresh the token to ensure its expiration is far enough in - // the future. - tok, tokErr = refreshToken(ts, tok) - if tokErr != nil { - return tls.Certificate{}, errtype.NewRefreshError( - "failed to refresh Oauth2 token", - inst.String(), - tokErr, - ) - } - req.AccessToken = tok.AccessToken + req.AccessToken = tok.Value } resp, err := retry50x(ctx, func(ctx2 context.Context) (*sqladmin.GenerateEphemeralCertResponse, error) { return client.Connect.GenerateEphemeralCert( @@ -235,10 +232,10 @@ func fetchEphemeralCert( nil, ) } - if ts != nil { + if tp != nil { // Adjust the certificate's expiration to be the earliest of // the token's expiration or the certificate's expiration. - if canRefresh(tok) && tok.Expiry.Before(clientCert.NotAfter) { + if tok.Expiry.Before(clientCert.NotAfter) { clientCert.NotAfter = tok.Expiry } } @@ -256,7 +253,7 @@ func newAdminAPIClient( l debug.ContextLogger, svc *sqladmin.Service, key *rsa.PrivateKey, - ts oauth2.TokenSource, + tp auth.TokenProvider, dialerID string, ) adminAPIClient { return adminAPIClient{ @@ -264,7 +261,7 @@ func newAdminAPIClient( logger: l, key: key, client: svc, - ts: ts, + tp: tp, } } @@ -277,8 +274,8 @@ type adminAPIClient struct { // key is used to generate the client certificate key *rsa.PrivateKey client *sqladmin.Service - // ts is the TokenSource used for IAM DB AuthN. - ts oauth2.TokenSource + // tp is the TokenProvider used for IAM DB AuthN. + tp auth.TokenProvider } // ConnectionInfo immediately performs a full refresh operation using the Cloud @@ -316,11 +313,11 @@ func (c adminAPIClient) ConnectionInfo( ecC := make(chan ecRes, 1) go func() { defer close(ecC) - var iamTS oauth2.TokenSource + var iamTP auth.TokenProvider if iamAuthNDial { - iamTS = c.ts + iamTP = c.tp } - ec, err := fetchEphemeralCert(ctx, c.client, cn, c.key, iamTS) + ec, err := fetchEphemeralCert(ctx, c.client, cn, c.key, iamTP) ecC <- ecRes{ec, err} }() diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/resolver.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/resolver.go index 676405e4..42bf0204 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/resolver.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/resolver.go @@ -20,6 +20,7 @@ import ( "net" "sort" + "cloud.google.com/go/cloudsqlconn/errtype" "cloud.google.com/go/cloudsqlconn/instance" ) @@ -63,10 +64,21 @@ type DNSInstanceConnectionNameResolver struct { func (r *DNSInstanceConnectionNameResolver) Resolve(ctx context.Context, icn string) (instanceName instance.ConnName, err error) { cn, err := instance.ParseConnName(icn) if err != nil { - // The connection name was not project:region:instance - // Attempt to query a TXT record and see if it works instead. - cn, err = r.queryDNS(ctx, icn) - if err != nil { + // The connection name was not in project:region:instance format. + // Check that connection name is a valid DNS domain name. + if instance.IsValidDomain(icn) { + // Attempt to query a TXT record and see if it works instead. + cn, err = r.queryDNS(ctx, icn) + if err != nil { + return instance.ConnName{}, err + } + } else { + // Connection name is not valid instance connection name or domain name + err := errtype.NewConfigError( + "invalid connection name, expected PROJECT:REGION:INSTANCE "+ + "format or valid DNS domain name", + icn, + ) return instance.ConnName{}, err } } @@ -105,7 +117,7 @@ func (r *DNSInstanceConnectionNameResolver) queryDNS(ctx context.Context, domain // Attempt to parse records, returning the first valid record. for _, record := range records { // Parse the target as a CN - cn, parseErr := instance.ParseConnName(record) + cn, parseErr := instance.ParseConnNameWithDomainName(record, domainName) if parseErr != nil { perr = fmt.Errorf("unable to parse TXT for %q -> %q : %v", domainName, record, parseErr) continue diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/metrics.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/metrics.go index a84753d1..052790d6 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/metrics.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/metrics.go @@ -108,14 +108,14 @@ var ( Name: "cloudsqlconn/bytes_sent", Measure: mBytesSent, Description: "The number of bytes sent to Cloud SQL", - Aggregation: view.LastValue(), + Aggregation: view.Sum(), TagKeys: []tag.Key{keyInstance, keyDialerID}, } bytesReceivedView = &view.View{ Name: "cloudsqlconn/bytes_received", Measure: mBytesReceived, Description: "The number of bytes received from Cloud SQL", - Aggregation: view.LastValue(), + Aggregation: view.Sum(), TagKeys: []tag.Key{keyInstance, keyDialerID}, } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/monitored_cache.go b/vendor/cloud.google.com/go/cloudsqlconn/monitored_cache.go new file mode 100644 index 00000000..b3929b53 --- /dev/null +++ b/vendor/cloud.google.com/go/cloudsqlconn/monitored_cache.go @@ -0,0 +1,146 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudsqlconn + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "cloud.google.com/go/cloudsqlconn/debug" + "cloud.google.com/go/cloudsqlconn/instance" +) + +// monitoredCache is a wrapper around a connectionInfoCache that tracks the +// number of connections to the associated instance. +type monitoredCache struct { + openConnsCount *uint64 + cn instance.ConnName + resolver instance.ConnectionNameResolver + logger debug.ContextLogger + + // domainNameTicker periodically checks any domain names to see if they + // changed. + domainNameTicker *time.Ticker + closedCh chan struct{} + + mu sync.Mutex + openConns []*instrumentedConn + closed bool + + connectionInfoCache +} + +func newMonitoredCache( + ctx context.Context, + cache connectionInfoCache, + cn instance.ConnName, + failoverPeriod time.Duration, + resolver instance.ConnectionNameResolver, + logger debug.ContextLogger) *monitoredCache { + + c := &monitoredCache{ + openConnsCount: new(uint64), + closedCh: make(chan struct{}), + cn: cn, + resolver: resolver, + logger: logger, + connectionInfoCache: cache, + } + if cn.HasDomainName() { + c.domainNameTicker = time.NewTicker(failoverPeriod) + go func() { + for { + select { + case <-c.domainNameTicker.C: + c.purgeClosedConns() + c.checkDomainName(ctx) + case <-c.closedCh: + return + } + } + }() + + } + + return c +} +func (c *monitoredCache) isClosed() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.closed +} + +func (c *monitoredCache) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.closed { + return nil + } + + c.closed = true + close(c.closedCh) + + if c.domainNameTicker != nil { + c.domainNameTicker.Stop() + } + + if atomic.LoadUint64(c.openConnsCount) > 0 { + for _, socket := range c.openConns { + if !socket.isClosed() { + _ = socket.Close() // force socket closed, ok to ignore error. + } + } + atomic.StoreUint64(c.openConnsCount, 0) + } + + return c.connectionInfoCache.Close() +} + +func (c *monitoredCache) purgeClosedConns() { + c.mu.Lock() + defer c.mu.Unlock() + + var open []*instrumentedConn + for _, s := range c.openConns { + if !s.isClosed() { + open = append(open, s) + } + } + c.openConns = open +} + +func (c *monitoredCache) checkDomainName(ctx context.Context) { + if !c.cn.HasDomainName() { + return + } + newCn, err := c.resolver.Resolve(ctx, c.cn.DomainName()) + if err != nil { + // The domain name could not be resolved. + c.logger.Debugf(ctx, "domain name %s for instance %s did not resolve, "+ + "closing all connections: %v", + c.cn.DomainName(), c.cn.Name(), err) + c.Close() + } + if newCn != c.cn { + // The instance changed. + c.logger.Debugf(ctx, "domain name %s changed from %s to %s, "+ + "closing all connections.", + c.cn.DomainName(), c.cn.Name(), newCn.Name()) + c.Close() + } + +} diff --git a/vendor/cloud.google.com/go/cloudsqlconn/options.go b/vendor/cloud.google.com/go/cloudsqlconn/options.go index c21fcd2a..5965a938 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/options.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/options.go @@ -22,12 +22,14 @@ import ( "os" "time" + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/credentials" + "cloud.google.com/go/auth/oauth2adapt" "cloud.google.com/go/cloudsqlconn/debug" "cloud.google.com/go/cloudsqlconn/errtype" "cloud.google.com/go/cloudsqlconn/instance" "cloud.google.com/go/cloudsqlconn/internal/cloudsql" "golang.org/x/oauth2" - "golang.org/x/oauth2/google" apiopt "google.golang.org/api/option" sqladmin "google.golang.org/api/sqladmin/v1beta4" ) @@ -44,16 +46,18 @@ type dialerConfig struct { useIAMAuthN bool logger debug.ContextLogger lazyRefresh bool - iamLoginTokenSource oauth2.TokenSource + clientUniverseDomain string + quotaProject string + authCredentials *auth.Credentials + iamLoginTokenProvider auth.TokenProvider useragents []string - credentialsUniverse string - serviceUniverse string setAdminAPIEndpoint bool - setUniverseDomain bool setCredentials bool + setHTTPClient bool setTokenSource bool setIAMAuthNTokenSource bool resolver instance.ConnectionNameResolver + failoverPeriod time.Duration // err tracks any dialer options that may have failed. err error } @@ -86,26 +90,25 @@ func WithCredentialsFile(filename string) Option { // or refresh token JSON credentials to be used as the basis for authentication. func WithCredentialsJSON(b []byte) Option { return func(d *dialerConfig) { - c, err := google.CredentialsFromJSON(context.Background(), b, sqladmin.SqlserviceAdminScope) + c, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: []string{sqladmin.SqlserviceAdminScope}, + CredentialsJSON: b, + }) if err != nil { d.err = errtype.NewConfigError(err.Error(), "n/a") return } - ud, err := c.GetUniverseDomain() - if err != nil { - d.err = errtype.NewConfigError(err.Error(), "n/a") - return - } - d.credentialsUniverse = ud - d.sqladminOpts = append(d.sqladminOpts, apiopt.WithCredentials(c)) - + d.authCredentials = c // Create another set of credentials scoped to login only - scoped, err := google.CredentialsFromJSON(context.Background(), b, iamLoginScope) + scoped, err := credentials.DetectDefault(&credentials.DetectOptions{ + Scopes: []string{iamLoginScope}, + CredentialsJSON: b, + }) if err != nil { d.err = errtype.NewConfigError(err.Error(), "n/a") return } - d.iamLoginTokenSource = scoped.TokenSource + d.iamLoginTokenProvider = scoped.TokenProvider d.setCredentials = true } } @@ -157,7 +160,7 @@ func WithIAMAuthNTokenSources(apiTS, iamLoginTS oauth2.TokenSource) Option { return func(d *dialerConfig) { d.setIAMAuthNTokenSource = true d.setCredentials = true - d.iamLoginTokenSource = iamLoginTS + d.iamLoginTokenProvider = oauth2adapt.TokenProviderFromTokenSource(iamLoginTS) d.sqladminOpts = append(d.sqladminOpts, apiopt.WithTokenSource(apiTS)) } } @@ -183,6 +186,7 @@ func WithRefreshTimeout(t time.Duration) Option { func WithHTTPClient(client *http.Client) Option { return func(d *dialerConfig) { d.sqladminOpts = append(d.sqladminOpts, apiopt.WithHTTPClient(client)) + d.setHTTPClient = true } } @@ -192,7 +196,6 @@ func WithAdminAPIEndpoint(url string) Option { return func(d *dialerConfig) { d.sqladminOpts = append(d.sqladminOpts, apiopt.WithEndpoint(url)) d.setAdminAPIEndpoint = true - d.serviceUniverse = "" } } @@ -201,15 +204,14 @@ func WithAdminAPIEndpoint(url string) Option { func WithUniverseDomain(ud string) Option { return func(d *dialerConfig) { d.sqladminOpts = append(d.sqladminOpts, apiopt.WithUniverseDomain(ud)) - d.serviceUniverse = ud - d.setUniverseDomain = true + d.clientUniverseDomain = ud } } // WithQuotaProject returns an Option that specifies the project used for quota and billing purposes. func WithQuotaProject(p string) Option { return func(cfg *dialerConfig) { - cfg.sqladminOpts = append(cfg.sqladminOpts, apiopt.WithQuotaProject(p)) + cfg.quotaProject = p } } @@ -271,6 +273,16 @@ func WithDNSResolver() Option { } } +// WithFailoverPeriod will cause the connector to periodically check the SRV DNS +// records of instance configured using DNS names. By default, this is 30 +// seconds. If this is set to 0, the connector will only check for domain name +// changes when establishing a new connection. +func WithFailoverPeriod(f time.Duration) Option { + return func(d *dialerConfig) { + d.failoverPeriod = f + } +} + type debugLoggerWithoutContext struct { logger debug.Logger } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/version.txt b/vendor/cloud.google.com/go/cloudsqlconn/version.txt index f8f4f03b..41c11ffb 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/version.txt +++ b/vendor/cloud.google.com/go/cloudsqlconn/version.txt @@ -1 +1 @@ -1.12.1 +1.16.1 diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2..bcfb5d81 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,26 @@ # Changes +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13) + + +### Features + +* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go new file mode 100644 index 00000000..8ec673b8 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/log.go @@ -0,0 +1,149 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" +) + +// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog +// to avoid the dependency. The compute/metadata module is used by too many +// non-client library modules that can't justify the dependency. + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} + +// httpRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func httpRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// httpResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func httpResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b7..4c18a383 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -24,6 +24,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -60,7 +61,10 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var defaultClient = &Client{hc: newDefaultHTTPClient()} +var defaultClient = &Client{ + hc: newDefaultHTTPClient(), + logger: slog.New(noOpHandler{}), +} func newDefaultHTTPClient() *http.Client { return &http.Client{ @@ -408,17 +412,42 @@ func strsContains(ss []string, s string) bool { // A Client provides metadata. type Client struct { - hc *http.Client + hc *http.Client + logger *slog.Logger +} + +// Options for configuring a [Client]. +type Options struct { + // Client is the HTTP client used to make requests. Optional. + Client *http.Client + // Logger is used to log information about HTTP request and responses. + // If not provided, nothing will be logged. Optional. + Logger *slog.Logger } // NewClient returns a Client that can be used to fetch metadata. // Returns the client that uses the specified http.Client for HTTP requests. // If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { - if c == nil { + return NewWithOptions(&Options{ + Client: c, + }) +} + +// NewWithOptions returns a Client that is configured with the provided Options. +func NewWithOptions(opts *Options) *Client { + if opts == nil { return defaultClient } - return &Client{hc: c} + client := opts.Client + if client == nil { + client = newDefaultHTTPClient() + } + logger := opts.Logger + if logger == nil { + logger = slog.New(noOpHandler{}) + } + return &Client{hc: client, logger: logger} } // getETag returns a value from the metadata service as well as the associated ETag. @@ -448,14 +477,26 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string req.Header.Set("User-Agent", userAgent) var res *http.Response var reqErr error + var body []byte retryer := newRetryer() for { + c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil)) res, reqErr = c.hc.Do(req) var code int if res != nil { code = res.StatusCode + body, err = io.ReadAll(res.Body) + if err != nil { + res.Body.Close() + return "", "", err + } + c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body)) + res.Body.Close() } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } @@ -466,18 +507,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string if reqErr != nil { return "", "", reqErr } - defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := io.ReadAll(res.Body) - if err != nil { - return "", "", err - } if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} + return "", "", &Error{Code: res.StatusCode, Message: string(body)} } - return string(all), res.Header.Get("Etag"), nil + return string(body), res.Header.Get("Etag"), nil } // Get returns a value from the metadata service. diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f89..2e53f012 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index a6675492..926ed388 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,82 @@ # Release History +## 1.18.0 (2025-04-03) + +### Features Added + +* Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token + + +## 1.17.1 (2025-03-20) + +### Other Changes + +* Upgraded to Go 1.23 +* Upgraded dependencies + +## 1.17.0 (2025-01-07) + +### Features Added + +* Added field `OperationLocationResultPath` to `runtime.NewPollerOptions[T]` for LROs that use the `Operation-Location` pattern. +* Support `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces in `arm.ResourceID`. + +## 1.16.0 (2024-10-17) + +### Features Added + +* Added field `Kind` to `runtime.StartSpanOptions` to allow a kind to be set when starting a span. + +### Bugs Fixed + +* `BearerTokenPolicy` now rewinds request bodies before retrying + +## 1.15.0 (2024-10-14) + +### Features Added + +* `BearerTokenPolicy` handles CAE claims challenges + +### Bugs Fixed + +* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled. +* Fixed an integer overflow in the retry policy. + +### Other Changes + +* Update dependencies. + +## 1.14.0 (2024-08-07) + +### Features Added + +* Added field `Attributes` to `runtime.StartSpanOptions` to simplify creating spans with attributes. + +### Other Changes + +* Include the HTTP verb and URL in `log.EventRetryPolicy` log entries so it's clear which operation is being retried. + +## 1.13.0 (2024-07-16) + +### Features Added + +- Added runtime.NewRequestFromRequest(), allowing for a policy.Request to be created from an existing *http.Request. + +## 1.12.0 (2024-06-06) + +### Features Added + +* Added field `StatusCodes` to `runtime.FetcherForNextLinkOptions` allowing for additional HTTP status codes indicating success. +* Added func `NewUUID` to the `runtime` package for generating UUIDs. + +### Bugs Fixed + +* Fixed an issue that prevented pollers using the `Operation-Location` strategy from unmarshaling the final result in some cases. + +### Other Changes + +* Updated dependencies. + ## 1.11.1 (2024-04-02) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go index 17bd50c6..03cb227d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -11,4 +11,7 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" // ResponseError is returned when a request is made to a service and // the service returns a non-success HTTP status code. // Use errors.As() to access this type in the error chain. +// +// When marshaling instances, the RawResponse field will be omitted. +// However, the contents returned by Error() will be preserved. type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index f2b296b6..46017003 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -47,8 +47,13 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool { // AccessToken represents an Azure service bearer access token with expiry information. // Exported as azcore.AccessToken. type AccessToken struct { - Token string + // Token is the access token + Token string + // ExpiresOn indicates when the token expires ExpiresOn time.Time + // RefreshOn is a suggested time to refresh the token. + // Clients should ignore this value when it's zero. + RefreshOn time.Time } // TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index 3041984d..e3e2d4e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -7,6 +7,7 @@ package exported import ( + "bytes" "context" "encoding/base64" "errors" @@ -67,6 +68,42 @@ func (ov opValues) get(value any) bool { return ok } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +// Exported as runtime.NewRequestFromRequest(). +func NewRequestFromRequest(req *http.Request) (*Request, error) { + policyReq := &Request{req: req} + + if req.Body != nil { + // we can avoid a body copy here if the underlying stream is already a + // ReadSeekCloser. + readSeekCloser, isReadSeekCloser := req.Body.(io.ReadSeekCloser) + + if !isReadSeekCloser { + // since this is an already populated http.Request we want to copy + // over its body, if it has one. + bodyBytes, err := io.ReadAll(req.Body) + + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + readSeekCloser = NopCloser(bytes.NewReader(bodyBytes)) + } + + // SetBody also takes care of updating the http.Request's body + // as well, so they should stay in-sync from this point. + if err := policyReq.SetBody(readSeekCloser, req.Header.Get("Content-Type")); err != nil { + return nil, err + } + } + + return policyReq, nil +} + // NewRequest creates a new Request with the specified input. // Exported as runtime.NewRequest(). func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index 08a95458..8aec256b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -117,12 +117,18 @@ type ResponseError struct { StatusCode int // RawResponse is the underlying HTTP response. - RawResponse *http.Response + RawResponse *http.Response `json:"-"` + + errMsg string } // Error implements the error interface for type ResponseError. // Note that the message contents are not contractual and can change over time. func (e *ResponseError) Error() string { + if e.errMsg != "" { + return e.errMsg + } + const separator = "--------------------------------------------------------------------------------" // write the request method and URL with response status code msg := &bytes.Buffer{} @@ -163,5 +169,33 @@ func (e *ResponseError) Error() string { } fmt.Fprintln(msg, separator) - return msg.String() + e.errMsg = msg.String() + return e.errMsg +} + +// internal type used for marshaling/unmarshaling +type responseError struct { + ErrorCode string `json:"errorCode"` + StatusCode int `json:"statusCode"` + ErrorMessage string `json:"errorMessage"` +} + +func (e ResponseError) MarshalJSON() ([]byte, error) { + return json.Marshal(responseError{ + ErrorCode: e.ErrorCode, + StatusCode: e.StatusCode, + ErrorMessage: e.Error(), + }) +} + +func (e *ResponseError) UnmarshalJSON(data []byte) error { + re := responseError{} + if err := json.Unmarshal(data, &re); err != nil { + return err + } + + e.ErrorCode = re.ErrorCode + e.StatusCode = re.StatusCode + e.errMsg = re.ErrorMessage + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go index ccd4794e..a5346276 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -155,5 +155,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go index 0d781b31..8751b051 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -131,5 +131,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go index 51aede8a..7f8d11b8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -124,7 +124,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { return exported.NewResponseError(p.resp) } - return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), "", out) } // SanitizePollerPath removes any fake-appended suffix from a URL's path. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go index 7a56c521..04828527 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -119,5 +119,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { } func (p *Poller[T]) Result(ctx context.Context, out *T) error { - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index ac1c0efb..f4963318 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -40,12 +40,13 @@ type Poller[T any] struct { OrigURL string `json:"origURL"` Method string `json:"method"` FinalState pollers.FinalStateVia `json:"finalState"` + ResultPath string `json:"resultPath"` CurState string `json:"state"` } // New creates a new Poller from the provided initial response. // Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia, resultPath string) (*Poller[T], error) { if resp == nil { log.Write(log.EventLRO, "Resuming Operation-Location poller.") return &Poller[T]{pl: pl}, nil @@ -82,6 +83,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi OrigURL: resp.Request.URL.String(), Method: resp.Request.Method, FinalState: finalState, + ResultPath: resultPath, CurState: curState, }, nil } @@ -115,10 +117,9 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { func (p *Poller[T]) Result(ctx context.Context, out *T) error { var req *exported.Request var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) - } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { - // no final GET required, terminal response should have it } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { return rlErr } else if rl != "" { @@ -134,6 +135,8 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { // if a final GET request has been created, execute it if req != nil { + // no JSON path when making a final GET request + p.ResultPath = "" resp, err := p.pl.Do(req) if err != nil { return err @@ -141,5 +144,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), p.ResultPath, out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go index eb3cf651..6a7a32e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -159,7 +159,7 @@ func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, up // ResultHelper processes the response as success or failure. // In the success case, it unmarshals the payload into either a new instance of T or out. // In the failure case, it creates an *azcore.Response error from the response. -func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { +func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out *T) error { // short-circuit the simple success case with no response body to unmarshal if resp.StatusCode == http.StatusNoContent { return nil @@ -176,6 +176,18 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { if err != nil { return err } + + if jsonPath != "" && len(payload) > 0 { + // extract the payload from the specified JSON path. + // do this before the zero-length check in case there + // is no payload. + jsonBody := map[string]json.RawMessage{} + if err = json.Unmarshal(payload, &jsonBody); err != nil { + return err + } + payload = jsonBody[jsonPath] + } + if len(payload) == 0 { return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 03691cbf..85514db3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.11.1" + Version = "v1.18.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index 8d984535..bb37a5ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -161,19 +161,20 @@ type BearerTokenOptions struct { // AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request. type AuthorizationHandler struct { - // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token - // from the policy's given credential. Implementations that need to perform I/O should use the Request's context, - // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't - // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a - // token from its credential according to its configuration. + // OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest + // whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request + // with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context, + // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send + // the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token + // from its credential according to its configuration. OnRequest func(*Request, func(TokenRequestOptions) error) error - // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the - // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible - // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's - // given credential. Implementations that need to perform I/O should use the Request's context, available from - // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil, - // the policy will return any 401 response to the client. + // OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon + // receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle. + // OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the + // Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given + // TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When + // OnChallenge returns nil, the policy will send the Request again. OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index cffe692d..c66fc0a9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -32,6 +32,7 @@ type PagingHandler[T any] struct { } // Pager provides operations for iterating over paged responses. +// Methods on this type are not safe for concurrent use. type Pager[T any] struct { current *T handler PagingHandler[T] @@ -94,6 +95,10 @@ type FetcherForNextLinkOptions struct { // NextReq is the func to be called when requesting subsequent pages. // Used for paged operations that have a custom next link operation. NextReq func(context.Context, string) (*policy.Request, error) + + // StatusCodes contains additional HTTP status codes indicating success. + // The default value is http.StatusOK. + StatusCodes []int } // FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. @@ -105,10 +110,13 @@ type FetcherForNextLinkOptions struct { func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { var req *policy.Request var err error + if options == nil { + options = &FetcherForNextLinkOptions{} + } if nextLink == "" { req, err = firstReq(ctx) } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { - if options != nil && options.NextReq != nil { + if options.NextReq != nil { req, err = options.NextReq(ctx, nextLink) } else { req, err = NewRequest(ctx, http.MethodGet, nextLink) @@ -121,7 +129,9 @@ func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, first if err != nil { return nil, err } - if !HasStatusCode(resp, http.StatusOK) { + successCodes := []int{http.StatusOK} + successCodes = append(successCodes, options.StatusCodes...) + if !HasStatusCode(resp, successCodes...) { return nil, NewResponseError(resp) } return resp, nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index cb2a6952..1950a2e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -4,9 +4,12 @@ package runtime import ( + "encoding/base64" "errors" "net/http" + "regexp" "strings" + "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -17,6 +20,11 @@ import ( ) // BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle +// additional authentication challenges, or needing more control over authorization, should +// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions]. +// +// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation type BearerTokenPolicy struct { // mainResource is the resource to be retreived using the tenant specified in the credential mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState] @@ -43,6 +51,15 @@ func acquire(state acquiringResourceState) (newResource exported.AccessToken, ne return tk, tk.ExpiresOn, nil } +// shouldRefresh determines whether the token should be refreshed. It's a variable so tests can replace it. +var shouldRefresh = func(tk exported.AccessToken, _ acquiringResourceState) bool { + if tk.RefreshOn.IsZero() { + return tk.ExpiresOn.Add(-5 * time.Minute).Before(time.Now()) + } + // no offset in this case because the authority suggested a refresh window--between RefreshOn and ExpiresOn + return tk.RefreshOn.Before(time.Now()) +} + // NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. // cred: an azcore.TokenCredential implementation such as a credential object from azidentity // scopes: the list of permission scopes required for the token. @@ -51,11 +68,24 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * if opts == nil { opts = &policy.BearerTokenOptions{} } + ah := opts.AuthorizationHandler + if ah.OnRequest == nil { + // Set a default OnRequest that simply requests a token with the given scopes. OnChallenge + // doesn't get a default so the policy can use a nil check to determine whether the caller + // provided an implementation. + ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + // authNZ sets EnableCAE: true in all cases, no need to duplicate that here + return authNZ(policy.TokenRequestOptions{Scopes: scopes}) + } + } + mr := temporal.NewResourceWithOptions(acquire, temporal.ResourceOptions[exported.AccessToken, acquiringResourceState]{ + ShouldRefresh: shouldRefresh, + }) return &BearerTokenPolicy{ - authzHandler: opts.AuthorizationHandler, + authzHandler: ah, cred: cred, scopes: scopes, - mainResource: temporal.NewResource(acquire), + mainResource: mr, allowHTTP: opts.InsecureAllowCredentialWithHTTP, } } @@ -63,6 +93,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts * // authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error { return func(tro policy.TokenRequestOptions) error { + tro.EnableCAE = true as := acquiringResourceState{p: b, req: req, tro: tro} tk, err := b.mainResource.Get(as) if err != nil { @@ -86,12 +117,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } - var err error - if b.authzHandler.OnRequest != nil { - err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) - } else { - err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) - } + err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) if err != nil { return nil, errorinfo.NonRetriableError(err) } @@ -101,17 +127,54 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { return nil, err } + res, err = b.handleChallenge(req, res, false) + return res, err +} + +// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling +// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge. +// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the +// AuthorizationHandler. +func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) { + var err error if res.StatusCode == http.StatusUnauthorized { b.mainResource.Expire() - if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil { - if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { - res, err = req.Next() + if res.Header.Get(shared.HeaderWWWAuthenticate) != "" { + caeChallenge, parseErr := parseCAEChallenge(res) + if parseErr != nil { + return res, parseErr + } + switch { + case caeChallenge != nil: + authNZ := func(tro policy.TokenRequestOptions) error { + // Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value + // will be empty at time of writing because CAE is the only feature involving claims. If in + // the future some client needs to specify unrelated claims, this function may need to merge + // them with the challenge claims. + tro.Claims = caeChallenge.params["claims"] + return b.authenticateAndAuthorize(req)(tro) + } + if err = b.authzHandler.OnRequest(req, authNZ); err == nil { + if err = req.RewindBody(); err == nil { + res, err = req.Next() + } + } + case b.authzHandler.OnChallenge != nil && !recursed: + if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil { + if err = req.RewindBody(); err == nil { + if res, err = req.Next(); err == nil { + res, err = b.handleChallenge(req, res, true) + } + } + } else { + // don't retry challenge handling errors + err = errorinfo.NonRetriableError(err) + } + default: + // return the response to the pipeline } } } - if err != nil { - err = errorinfo.NonRetriableError(err) - } return res, err } @@ -121,3 +184,65 @@ func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error { } return nil } + +// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none). +// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError. +func parseCAEChallenge(res *http.Response) (*authChallenge, error) { + var ( + caeChallenge *authChallenge + err error + ) + for _, c := range parseChallenges(res) { + if c.scheme == "Bearer" { + if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" { + if b, de := base64.StdEncoding.DecodeString(claims); de == nil { + c.params["claims"] = string(b) + caeChallenge = &c + } else { + // don't include the decoding error because it's something + // unhelpful like "illegal base64 data at input byte 42" + err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims)) + } + break + } + } + } + return caeChallenge, err +} + +var ( + challenge, challengeParams *regexp.Regexp + once = &sync.Once{} +) + +type authChallenge struct { + scheme string + params map[string]string +} + +// parseChallenges assumes authentication challenges have quoted parameter values +func parseChallenges(res *http.Response) []authChallenge { + once.Do(func() { + // matches challenges having quoted parameters, capturing scheme and parameters + challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`) + // captures parameter names and values in a match of the above expression + challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`) + }) + parsed := []authChallenge{} + // WWW-Authenticate can have multiple values, each containing multiple challenges + for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) { + for _, sm := range challenge.FindAllStringSubmatch(h, -1) { + // sm is [challenge, scheme, params] (see regexp documentation on submatches) + c := authChallenge{ + params: make(map[string]string), + scheme: sm[1], + } + for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) { + // sm is [key="value", key, value] (see regexp documentation on submatches) + c.params[sm[1]] = sm[2] + } + parsed = append(parsed, c) + } + } + return parsed +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go index 3df1c121..f375195c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go @@ -96,7 +96,10 @@ func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err erro // StartSpanOptions contains the optional values for StartSpan. type StartSpanOptions struct { - // for future expansion + // Kind indicates the kind of Span. + Kind tracing.SpanKind + // Attributes contains key-value pairs of attributes for the span. + Attributes []tracing.Attribute } // StartSpan starts a new tracing span. @@ -114,7 +117,6 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options // we MUST propagate the active tracer before returning so that the trace policy can access it ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) - const newSpanKind = tracing.SpanKindInternal if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), // then the span for Bar() must be suppressed. however, if Bar() makes a REST @@ -126,10 +128,19 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options return ctx, func(err error) {} } } + + if options == nil { + options = &StartSpanOptions{} + } + if options.Kind == 0 { + options.Kind = tracing.SpanKindInternal + } + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ - Kind: newSpanKind, + Kind: options.Kind, + Attributes: options.Attributes, }) - ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind) + ctx = context.WithValue(ctx, ctxActiveSpan{}, options.Kind) return ctx, func(err error) { if err != nil { errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index 04d7bb4e..4c3a31fe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -59,13 +59,33 @@ func setDefaults(o *policy.RetryOptions) { } func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - delay := time.Duration((1< o.MaxRetryDelay { + delayFloat := float64(delay) * jitterMultiplier + if delayFloat > float64(math.MaxInt64) { + // the jitter pushed us over MaxInt64, so just use MaxInt64 + delay = time.Duration(math.MaxInt64) + } else { + delay = time.Duration(delayFloat) + } + + if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value delay = o.MaxRetryDelay } + return delay } @@ -102,7 +122,8 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { try := int32(1) for { resp = nil // reset - log.Writef(log.EventRetryPolicy, "=====> Try=%d", try) + // unfortunately we don't have access to the custom allow-list of query params, so we'll redact everything but the default allowed QPs + log.Writef(log.EventRetryPolicy, "=====> Try=%d for %s %s", try, req.Raw().Method, getSanitizedURL(*req.Raw().URL, getAllowedQueryParams(nil))) // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because // the stream may not be at offset 0 when we first get it and we want the same behavior for the diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 03f76c9a..4f90e447 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -50,8 +50,14 @@ const ( // NewPollerOptions contains the optional parameters for NewPoller. type NewPollerOptions[T any] struct { // FinalStateVia contains the final-state-via value for the LRO. + // NOTE: used only for Azure-AsyncOperation and Operation-Location LROs. FinalStateVia FinalStateVia + // OperationLocationResultPath contains the JSON path to the result's + // payload when it's included with the terminal success response. + // NOTE: only used for Operation-Location LROs. + OperationLocationResultPath string + // Response contains a preconstructed response type. // The final payload will be unmarshaled into it and returned. Response *T @@ -98,7 +104,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { // op poller must be checked before loc as it can also have a location header - opr, err = op.New[T](pl, resp, options.FinalStateVia) + opr, err = op.New[T](pl, resp, options.FinalStateVia, options.OperationLocationResultPath) } else if loc.Applicable(resp) { opr, err = loc.New[T](pl, resp) } else if body.Applicable(resp) { @@ -172,7 +178,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options } else if loc.CanResume(asJSON) { opr, _ = loc.New[T](pl, nil) } else if op.CanResume(asJSON) { - opr, _ = op.New[T](pl, nil, "") + opr, _ = op.New[T](pl, nil, "", "") } else { return nil, fmt.Errorf("unhandled poller token %s", string(raw)) } @@ -200,6 +206,7 @@ type PollingHandler[T any] interface { } // Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +// Methods on this type are not safe for concurrent use. type Poller[T any] struct { op PollingHandler[T] resp *http.Response diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 06ac95b1..7d34b780 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -15,6 +15,7 @@ import ( "fmt" "io" "mime/multipart" + "net/http" "net/textproto" "net/url" "path" @@ -24,6 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" ) // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when @@ -44,6 +46,11 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic return exported.NewRequest(ctx, httpMethod, endpoint) } +// NewRequestFromRequest creates a new policy.Request with an existing *http.Request +func NewRequestFromRequest(req *http.Request) (*policy.Request, error) { + return exported.NewRequestFromRequest(req) +} + // EncodeQueryParams will parse and encode any query parameters in the specified URL. // Any semicolons will automatically be escaped. func EncodeQueryParams(u string) (string, error) { @@ -263,3 +270,12 @@ func SkipBodyDownload(req *policy.Request) { // CtxAPINameKey is used as a context key for adding/retrieving the API name. type CtxAPINameKey = shared.CtxAPINameKey + +// NewUUID returns a new UUID using the RFC4122 algorithm. +func NewUUID() (string, error) { + u, err := uuid.New() + if err != nil { + return "", err + } + return u.String(), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go index 4f1dcf1b..76dadf7d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -44,7 +44,7 @@ func Should(cls Event) bool { if log.lst == nil { return false } - if log.cls == nil || len(log.cls) == 0 { + if len(log.cls) == 0 { return true } for _, c := range log.cls { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go index 238ef42e..02aa1fb3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -11,9 +11,17 @@ import ( "time" ) +// backoff sets a minimum wait time between eager update attempts. It's a variable so tests can manipulate it. +var backoff = func(now, lastAttempt time.Time) bool { + return lastAttempt.Add(30 * time.Second).After(now) +} + // AcquireResource abstracts a method for refreshing a temporal resource. type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) +// ShouldRefresh abstracts a method for indicating whether a resource should be refreshed before expiration. +type ShouldRefresh[TResource, TState any] func(TResource, TState) bool + // Resource is a temporal resource (usually a credential) that requires periodic refreshing. type Resource[TResource, TState any] struct { // cond is used to synchronize access to the shared resource embodied by the remaining fields @@ -31,24 +39,43 @@ type Resource[TResource, TState any] struct { // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource lastAttempt time.Time + // shouldRefresh indicates whether the resource should be refreshed before expiration + shouldRefresh ShouldRefresh[TResource, TState] + // acquireResource is the callback function that actually acquires the resource acquireResource AcquireResource[TResource, TState] } // NewResource creates a new Resource that uses the specified AcquireResource for refreshing. func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { - return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} + r := &Resource[TResource, TState]{acquireResource: ar, cond: sync.NewCond(&sync.Mutex{})} + r.shouldRefresh = r.expiringSoon + return r +} + +// ResourceOptions contains optional configuration for Resource +type ResourceOptions[TResource, TState any] struct { + // ShouldRefresh indicates whether [Resource.Get] should acquire an updated resource despite + // the currently held resource not having expired. [Resource.Get] ignores all errors from + // refresh attempts triggered by ShouldRefresh returning true, and doesn't call ShouldRefresh + // when the resource has expired (it unconditionally updates expired resources). When + // ShouldRefresh is nil, [Resource.Get] refreshes the resource if it will expire within 5 + // minutes. + ShouldRefresh ShouldRefresh[TResource, TState] +} + +// NewResourceWithOptions creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResourceWithOptions[TResource, TState any](ar AcquireResource[TResource, TState], opts ResourceOptions[TResource, TState]) *Resource[TResource, TState] { + r := NewResource(ar) + if opts.ShouldRefresh != nil { + r.shouldRefresh = opts.ShouldRefresh + } + return r } // Get returns the underlying resource. // If the resource is fresh, no refresh is performed. func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { - // If the resource is expiring within this time window, update it eagerly. - // This allows other threads/goroutines to keep running by using the not-yet-expired - // resource value while one thread/goroutine updates the resource. - const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration - const backoff = 30 * time.Second // Minimum wait time between eager update attempts - now, acquire, expired := time.Now(), false, false // acquire exclusive lock @@ -65,9 +92,8 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { break } // Getting here means that this thread/goroutine will wait for the updated resource - } else if er.expiration.Add(-window).Before(now) { - // The resource is valid but is expiring within the time window - if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + } else if er.shouldRefresh(resource, state) { + if !(er.acquiring || backoff(now, er.lastAttempt)) { // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted // to do so within the last 30 seconds, this thread/goroutine will do it er.acquiring, acquire = true, true @@ -121,3 +147,8 @@ func (er *Resource[TResource, TState]) Expire() { // Reset the expiration as if we never got this resource to begin with er.expiration = time.Time{} } + +func (er *Resource[TResource, TState]) expiringSoon(TResource, TState) bool { + // call time.Now() instead of using Get's value so ShouldRefresh doesn't need a time.Time parameter + return er.expiration.Add(-5 * time.Minute).Before(time.Now()) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index 284ea54e..0056d112 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,110 @@ # Release History +## 1.6.1 (2025-04-16) + +### Bugs Fixed +* Fixed return value of DownloadBuffer when the HTTPRange count given is greater than the data length. Fixes [#23884](https://github.com/Azure/azure-sdk-for-go/issues/23884) + +### Other Changes +* Updated `azidentity` version to `1.9.0` +* Updated `azcore` version to `1.18.0` + +## 1.6.1-beta.1 (2025-02-12) + +### Features Added +* Upgraded service version to `2025-05-05`. + +## 1.6.0 (2025-01-23) + +### Features Added +* Upgraded service version to `2025-01-05`. + +## 1.6.0-beta.1 (2025-01-13) + +### Features Added +* Added permissions & resourcetype parameters in listblob response. +* Added BlobProperties field in BlobPrefix definition in listblob response. + +### Bugs Fixed +* Fix FilterBlob API if Query contains a space character. Fixes [#23546](https://github.com/Azure/azure-sdk-for-go/issues/23546) + +## 1.5.0 (2024-11-13) + +### Features Added +* Fix compareHeaders custom sorting algorithm for String To Sign. + +## 1.5.0-beta.1 (2024-10-22) + +### Other Changes +* Updated `azcore` version to `1.16.0` +* Updated `azidentity` version to `1.8.0` + +## 1.4.1 (2024-09-18) + +### Features Added +* Added crc64 response header to Put Blob. +* Upgraded service version to `2024-08-04`. + +## 1.4.1-beta.1 (2024-08-27) + +### Features Added +* Upgraded service version to `2024-08-04`. + +### Other Changes +* Updated `azcore` version to `1.14.0` + +## 1.4.0 (2024-07-18) + +### Other Changes +* GetProperties() was called twice in DownloadFile method. Enhanced to call it only once, reducing latency. +* Updated `azcore` version to `1.13.0` + +## 1.4.0-beta.1 (2024-06-14) + +### Features Added +* Updated service version to `2024-05-04`. + +### Other Changes +* Updated `azidentity` version to `1.6.0` +* Updated `azcore` version to `1.12.0` + +## 1.3.2 (2024-04-09) + +### Bugs Fixed +* Fixed an issue where GetSASURL() was providing HTTPS SAS, instead of the default http+https SAS. Fixes [#22448](https://github.com/Azure/azure-sdk-for-go/issues/22448) + +### Other Changes +* Integrate `InsecureAllowCredentialWithHTTP` client options. +* Update dependencies. + +## 1.3.1 (2024-02-28) + +### Bugs Fixed + +* Re-enabled `SharedKeyCredential` authentication mode for non TLS protected endpoints. +* Use random write in `DownloadFile` method. Fixes [#22426](https://github.com/Azure/azure-sdk-for-go/issues/22426). + +## 1.3.0 (2024-02-12) + +### Bugs Fixed +* Fix concurrency issue while Downloading File. Fixes [#22156](https://github.com/Azure/azure-sdk-for-go/issues/22156). +* Fix panic when nil options bag is passed to NewGetPageRangesPager. Fixes [22356](https://github.com/Azure/azure-sdk-for-go/issues/22356). +* Fix file offset update after Download file. Fixes [#22297](https://github.com/Azure/azure-sdk-for-go/issues/22297). + +### Other Changes +* Updated the version of `azcore` to `1.9.2` + +## 1.3.0-beta.1 (2024-01-09) + +### Features Added + +* Updated service version to `2023-11-03`. +* Added support for Audience when OAuth is used. + +### Bugs Fixed + +* Block `SharedKeyCredential` authentication mode for non TLS protected endpoints. Fixes [#21841](https://github.com/Azure/azure-sdk-for-go/issues/21841). + ## 1.2.1 (2023-12-13) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 905fb267..9fbc90d6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,6 +1,9 @@ # Azure Blob Storage module for Go +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azblob)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azdatalake%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=2854&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/2854/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/2854/main) -> Service Version: 2023-08-03 +> Service Version: 2023-11-03 Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or @@ -19,7 +22,7 @@ Key links: ### Prerequisites -- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install) +- [Supported](https://aka.ms/azsdk/go/supported-versions) version of Go - [Install Go](https://go.dev/doc/install) - Azure subscription - [Create a free account](https://azure.microsoft.com/free/) - Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal], [Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. @@ -246,7 +249,7 @@ For more information see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. -![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png) + [source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index 2229b7d8..3bf05897 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -9,19 +9,19 @@ package appendblob import ( "context" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "os" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -35,11 +35,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -54,7 +55,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -71,7 +72,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -291,8 +292,8 @@ func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (ab *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return ab.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json index 80d6183c..11b07dbb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_0040e8284c" + "Tag": "go/storage/azblob_db9a368fe4" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index d2421ddd..98a624f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -11,6 +11,7 @@ import ( "io" "os" "sync" + "sync/atomic" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" @@ -36,15 +37,16 @@ type Client base.Client[generated.BlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewBlobClient(blobURL, azClient, &cred)), nil + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -54,11 +56,11 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewBlobClient(blobURL, azClient, nil)), nil + return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -70,11 +72,11 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewBlobClient(blobURL, azClient, cred)), nil + return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -112,6 +114,10 @@ func (b *Client) credential() any { return base.Credential((*base.Client[generated.BlobClient])(b)) } +func (b *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (b *Client) URL() string { return b.generated().Endpoint() @@ -126,7 +132,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -138,7 +144,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. @@ -180,9 +186,9 @@ func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOption // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { +func (b *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { opts, leaseAccessConditions, modifiedAccessConditions := o.format() - resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &httpHeaders, leaseAccessConditions, modifiedAccessConditions) return resp, err } @@ -329,7 +335,8 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl if o.BlockSize == 0 { o.BlockSize = DefaultDownloadBlockSize } - + dataDownloaded := int64(0) + computeReadLength := true count := o.Range.Count if count == CountToEnd { // If size not specified, calculate it // If we don't have the length at all, get it @@ -338,6 +345,8 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl return 0, err } count = *gr.ContentLength - o.Range.Offset + dataDownloaded = count + computeReadLength = false } if count <= 0 { @@ -353,7 +362,7 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, - NumChunks: uint16(((count - 1) / o.BlockSize) + 1), + NumChunks: uint64(((count - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, chunkStart int64, count int64) error { downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ @@ -382,6 +391,9 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl if err != nil { return err } + if computeReadLength { + atomic.AddInt64(&dataDownloaded, *dr.ContentLength) + } err = body.Close() return err }, @@ -389,171 +401,7 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl if err != nil { return 0, err } - return count, nil -} - -// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely, -// but written to file serially -func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if o.BlockSize == 0 { - o.BlockSize = DefaultDownloadBlockSize - } - - if o.Concurrency == 0 { - o.Concurrency = DefaultConcurrency - } - - count := o.Range.Count - if count == CountToEnd { //Calculate size if not specified - gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) - if err != nil { - return 0, err - } - count = *gr.ContentLength - o.Range.Offset - } - - if count <= 0 { - // The file is empty, there is nothing to download. - return 0, nil - } - - progress := int64(0) - progressLock := &sync.Mutex{} - - // helper routine to get body - getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) { - downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ - Offset: chunkStart + o.Range.Offset, - Count: size, - }, nil) - dr, err := b.DownloadStream(ctx, downloadBlobOptions) - if err != nil { - return nil, err - } - - var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) - if o.Progress != nil { - rangeProgress := int64(0) - body = streaming.NewResponseProgress( - body, - func(bytesTransferred int64) { - diff := bytesTransferred - rangeProgress - rangeProgress = bytesTransferred - progressLock.Lock() - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - - return body, nil - } - - // if file fits in a single buffer, we'll download here. - if count <= o.BlockSize { - body, err := getBodyForRange(ctx, int64(0), count) - if err != nil { - return 0, err - } - defer body.Close() - - return io.Copy(writer, body) - } - - buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize) - defer buffers.Free() - acquireBuffer := func() ([]byte, error) { - select { - case b := <-buffers.Acquire(): - // got a buffer - return b, nil - default: - // no buffer available; allocate a new buffer if possible - if _, err := buffers.Grow(); err != nil { - return nil, err - } - - // either grab the newly allocated buffer or wait for one to become available - return <-buffers.Acquire(), nil - } - } - - numChunks := uint16((count-1)/o.BlockSize) + 1 - blocks := make([]chan []byte, numChunks) - for b := range blocks { - blocks[b] = make(chan []byte) - } - - /* - * We have created as many channels as the number of chunks we have. - * Each downloaded block will be sent to the channel matching its - * sequence number, i.e. 0th block is sent to 0th channel, 1st block - * to 1st channel and likewise. The blocks are then read and written - * to the file serially by below goroutine. Do note that the blocks - * are still downloaded parallelly from n/w, only serialized - * and written to file here. - */ - writerError := make(chan error) - writeSize := int64(0) - go func(ch chan error) { - for _, block := range blocks { - select { - case <-ctx.Done(): - return - case block := <-block: - n, err := writer.Write(block) - writeSize += int64(n) - buffers.Release(block[:cap(block)]) - if err != nil { - ch <- err - return - } - } - } - ch <- nil - }(writerError) - - // Prepare and do parallel download. - err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ - OperationName: "downloadBlobToWriterAt", - TransferSize: count, - ChunkSize: o.BlockSize, - NumChunks: numChunks, - Concurrency: o.Concurrency, - Operation: func(ctx context.Context, chunkStart int64, count int64) error { - buff, err := acquireBuffer() - if err != nil { - return err - } - - body, err := getBodyForRange(ctx, chunkStart, count) - if err != nil { - buffers.Release(buff) - return nil - } - - _, err = io.ReadFull(body, buff[:count]) - body.Close() - if err != nil { - return err - } - - blockIndex := chunkStart / o.BlockSize - blocks[blockIndex] <- buff[:count] - return nil - }, - }) - - if err != nil { - return 0, err - } - // error from writer thread. - if err = <-writerError; err != nil { - return 0, err - } - return writeSize, nil + return dataDownloaded, nil } // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. @@ -607,6 +455,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil return 0, err } size = *props.ContentLength - do.Range.Offset + do.Range.Count = size } else { size = count } @@ -623,7 +472,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil } if size > 0 { - return b.downloadFile(ctx, file, *do) + return b.downloadBuffer(ctx, file, *do) } else { // if the blob's size is 0, there is no need in downloading it return 0, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index 1deedb59..a625c995 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -101,7 +101,6 @@ func (s *RetryReader) setResponse(r io.ReadCloser) { // Read from retry reader func (s *RetryReader) Read(p []byte) (n int, err error) { for try := int32(0); ; try++ { - //fmt.Println(try) // Comment out for debugging. if s.countWasBounded && s.info.Range.Count == CountToEnd { // User specified an original count and the remaining bytes are 0, return 0, EOF return 0, io.EOF diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index e3167b77..7a3ab3fe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -11,9 +11,6 @@ import ( "context" "encoding/base64" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "math" "os" @@ -22,16 +19,19 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -45,11 +45,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -63,7 +64,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -80,7 +81,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -363,8 +364,8 @@ func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (bb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. @@ -473,7 +474,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu OperationName: "uploadFromReader", TransferSize: actualSize, ChunkSize: o.BlockSize, - NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1), + NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, offset int64, chunkSize int64) error { // This function is called once per block. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml index 03035033..2259336b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -21,8 +21,8 @@ pr: - sdk/storage/azblob -stages: - - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: 'storage/azblob' RunLiveTests: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go index 5c4b719c..f36a1624 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -31,11 +31,7 @@ type Client struct { // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClient(serviceURL, cred, clientOptions) + svcClient, err := service.NewClient(serviceURL, cred, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -50,11 +46,7 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions) + svcClient, err := service.NewClientWithNoCredential(serviceURL, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -83,15 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti // - connectionString - a connection string for the desired storage account // - options - client options; pass nil to accept the default values func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { - if options == nil { - options = &ClientOptions{} - } - containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + svcClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) if err != nil { return nil, err } return &Client{ - svc: containerClient, + svc: svcClient, }, nil } @@ -106,7 +95,7 @@ func (c *Client) ServiceClient() *service.Client { } // CreateContainer is a lifecycle method to creates a new container under the specified account. -// If the container with the same name already exists, a ResourceExistsError will be raised. +// If the container with the same name already exists, a ContainerAlreadyExists Error will be raised. // This method returns a client with which to interact with the newly created container. func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) { return c.svc.CreateContainer(ctx, containerName, o) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go index 3058b5d4..0e43ed01 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -11,8 +11,6 @@ import ( "context" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "net/http" "net/url" "time" @@ -20,8 +18,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" @@ -42,15 +42,16 @@ type Client base.Client[generated.ContainerClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewContainerClient(containerURL, azClient, &cred)), nil + return (*Client)(base.NewContainerClient(containerURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -60,11 +61,11 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewContainerClient(containerURL, azClient, nil)), nil + return (*Client)(base.NewContainerClient(containerURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -76,11 +77,11 @@ func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewContainerClient(containerURL, azClient, cred)), nil + return (*Client)(base.NewContainerClient(containerURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -122,6 +123,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { return base.InnerClient((*base.Client[generated.BlobClient])(b)) } +func (c *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ContainerClient])(c)) +} + // URL returns the URL endpoint used by the Client object. func (c *Client) URL() string { return c.generated().Endpoint() @@ -133,7 +138,7 @@ func (c *Client) URL() string { func (c *Client) NewBlobClient(blobName string) *blob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlobClient), c.credential())) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.credential(), c.getClientOptions())) } // NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of @@ -142,7 +147,7 @@ func (c *Client) NewBlobClient(blobName string) *blob.Client { func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.AppendBlobClient), c.sharedKey())) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of @@ -151,7 +156,7 @@ func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlockBlobClient), c.sharedKey())) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of @@ -160,7 +165,7 @@ func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.PageBlobClient), c.sharedKey())) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. @@ -343,7 +348,6 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Tim // Containers do not have snapshots, nor versions. qps, err := sas.BlobSignatureValues{ Version: sas.Version, - Protocol: sas.ProtocolHTTPS, ContainerName: urlParts.ContainerName, Permissions: permissions.String(), StartTime: st, @@ -366,7 +370,8 @@ func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { switch cred := c.credential().(type) { case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred) + conOptions := c.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) case *SharedKeyCredential: authPolicy = exported.NewSharedKeyCredPolicy(cred) case nil: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go index 61d936ab..ccee90db 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -7,10 +7,11 @@ package container import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "reflect" "time" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -126,7 +127,7 @@ func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetProperties // ListBlobsInclude indicates what additional information the service should return with each blob. type ListBlobsInclude struct { - Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions, Permissions bool } func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { @@ -166,7 +167,9 @@ func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { if l.Versions { include = append(include, generated.ListBlobsIncludeItemVersions) } - + if l.Permissions { + include = append(include, generated.ListBlobsIncludeItemPermissions) + } return include } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index c95f1925..073de855 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -10,16 +10,24 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "strings" ) // ClientOptions contains the optional parameters when creating a Client. type ClientOptions struct { azcore.ClientOptions + + // Audience to use when requesting tokens for Azure Active Directory authentication. + // Only has an effect when credential is of type TokenCredential. The value could be + // https://storage.azure.com/ (default) or https://.blob.core.windows.net. + Audience string } type Client[T any] struct { inner *T credential any + options *ClientOptions } func InnerClient[T any](client *Client[T]) *T { @@ -39,28 +47,43 @@ func Credential[T any](client *Client[T]) any { return client.credential } +func GetClientOptions[T any](client *Client[T]) *ClientOptions { + return client.options +} + +func GetAudience(clOpts *ClientOptions) string { + if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 { + return shared.TokenScope + } else { + return strings.TrimRight(clOpts.Audience, "/") + "/.default" + } +} + func NewClient[T any](inner *T) *Client[T] { return &Client[T]{inner: inner} } -func NewServiceClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ServiceClient] { +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] { return &Client[generated.ServiceClient]{ inner: generated.NewServiceClient(containerURL, azClient), credential: credential, + options: options, } } -func NewContainerClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ContainerClient] { +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] { return &Client[generated.ContainerClient]{ inner: generated.NewContainerClient(containerURL, azClient), credential: credential, + options: options, } } -func NewBlobClient(blobURL string, azClient *azcore.Client, credential any) *Client[generated.BlobClient] { +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] { return &Client[generated.BlobClient]{ inner: generated.NewBlobClient(blobURL, azClient), credential: credential, + options: options, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go index 02966ee3..c26c62aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -11,12 +11,6 @@ import ( "bytes" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" "io" "mime" "mime/multipart" @@ -24,6 +18,13 @@ import ( "net/textproto" "strconv" "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( @@ -45,7 +46,7 @@ func createBatchID() (string, error) { // buildSubRequest is used for building the sub-request. Example: // DELETE /container0/blob0 HTTP/1.1 // x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Authorization: SharedKey account: // Content-Length: 0 func buildSubRequest(req *policy.Request) []byte { var batchSubRequest strings.Builder @@ -80,7 +81,7 @@ func buildSubRequest(req *policy.Request) []byte { // // DELETE /container0/blob0 HTTP/1.1 // x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Authorization: SharedKey account: // Content-Length: 0 func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { batchID, err := createBatchID() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index bd0bd5e2..b0be323b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -109,6 +109,91 @@ func getHeader(key string, headers map[string][]string) string { return "" } +func getWeightTables() [][]int { + tableLv0 := [...]int{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725, + 0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e, + 0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, + 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, + 0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, + 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, + 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0, + } + tableLv2 := [...]int{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } + tables := [][]int{tableLv0[:], tableLv2[:]} + return tables +} + +// NewHeaderStringComparer performs a multi-level, weight-based comparison of two strings +func compareHeaders(lhs, rhs string, tables [][]int) int { + currLevel, i, j := 0, 0, 0 + n := len(tables) + lhsLen := len(lhs) + rhsLen := len(rhs) + + for currLevel < n { + if currLevel == (n-1) && i != j { + if i > j { + return -1 + } + if i < j { + return 1 + } + return 0 + } + + var w1, w2 int + + // Check bounds before accessing lhs[i] + if i < lhsLen { + w1 = tables[currLevel][lhs[i]] + } else { + w1 = 0x1 + } + + // Check bounds before accessing rhs[j] + if j < rhsLen { + w2 = tables[currLevel][rhs[j]] + } else { + w2 = 0x1 + } + + if w1 == 0x1 && w2 == 0x1 { + i = 0 + j = 0 + currLevel++ + } else if w1 == w2 { + i++ + j++ + } else if w1 == 0 { + i++ + } else if w2 == 0 { + j++ + } else { + if w1 < w2 { + return -1 + } + if w1 > w2 { + return 1 + } + return 0 + } + } + return 0 +} + func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { cm := map[string][]string{} for k, v := range headers { @@ -125,7 +210,11 @@ func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) stri for key := range cm { keys = append(keys, key) } - sort.Strings(keys) + tables := getWeightTables() + // Sort the keys using the custom comparator + sort.Slice(keys, func(i, j int) bool { + return compareHeaders(keys[i], keys[j], tables) < 0 + }) ch := bytes.NewBufferString("") for i, key := range keys { if i > 0 { @@ -195,6 +284,13 @@ func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { } func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SharedKeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if s.cred == nil { + return req.Next() + } + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index c8be74c2..a18d0c67 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -7,6 +7,6 @@ package exported const ( - ModuleName = "azblob" - ModuleVersion = "v1.2.1" + ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + ModuleVersion = "v1.6.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 25deeec3..96d47c4a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -7,7 +7,7 @@ go: true clear-output-folder: false version: "^3.0.0" license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/ae95eb6a4701d844bada7d1c4f5ecf4a7444e5b8/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json" credential-scope: "https://storage.azure.com/.default" output-folder: ../generated file-prefix: "zz_" @@ -19,10 +19,55 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.61" +use: "@autorest/go@4.0.0-preview.65" ``` -### Updating service version to 2023-08-03 +### Add a Properties field to the BlobPrefix definition +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobPrefix.properties["Properties"] = { + "type": "object", + "$ref": "#/definitions/BlobPropertiesInternal" + }; +``` + +### Add Owner,Group,Permissions,Acl,ResourceType in ListBlob Response +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobPropertiesInternal.properties["Owner"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Group"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Permissions"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Acl"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["ResourceType"] = { + "type" : "string", + }; + +``` + +### Add permissions in ListBlobsInclude +``` yaml +directive: +- from: swagger-document + where: $.parameters.ListBlobsInclude + transform: > + $.items.enum.push("permissions"); +``` + +### Updating service version to 2025-05-05 ```yaml directive: - from: @@ -35,8 +80,21 @@ directive: where: $ transform: >- return $. - replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). - replaceAll(`2021-12-02`, `2023-08-03`); + replaceAll(`[]string{"2025-01-05"}`, `[]string{ServiceVersion}`); +``` + +### Fix CRC Response Header in PutBlob response +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}?BlockBlob"].put.responses["201"].headers + transform: > + $["x-ms-content-crc64"] = { + "x-ms-client-name": "ContentCRC64", + "type": "string", + "format": "byte", + "description": "Returned for a block blob so that the client can check the integrity of message content." + }; ``` ### Undo breaking change with BlobName @@ -293,7 +351,7 @@ directive: replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); -- from: zz_response_types.go +- from: zz_responses.go where: $ transform: >- return $. @@ -364,11 +422,13 @@ directive: ``` yaml directive: - - from: zz_service_client.go - where: $ - transform: >- - return $. - replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) +- from: + - zz_service_client.go + - zz_container_client.go + where: $ + transform: >- + return $. + replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/g, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`); ``` ### Change `where` parameter in blob filtering to be required diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go index 8c13c441..553cd227 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -6,4 +6,4 @@ package generated -const ServiceVersion = "2023-08-03" +const ServiceVersion = "2025-05-05" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index dbfe069e..9f9e145b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -32,7 +29,7 @@ type AppendBlobClient struct { // AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - body - Initial data // - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. @@ -72,54 +69,60 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.StructuredBodyType != nil { + req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.StructuredContentLength != nil { + req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -190,6 +193,9 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-structured-body"); val != "" { + result.StructuredBodyType = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } @@ -201,7 +207,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - sourceURL - Specify a URL to the copy source. // - contentLength - The length of the request. // - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL @@ -244,76 +250,76 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -387,7 +393,7 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp // Create - The Create Append Blob operation creates a new append blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. @@ -424,10 +430,25 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -438,21 +459,15 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -460,44 +475,35 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -560,7 +566,7 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -596,29 +602,29 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index caaa3dfe..54b29998 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -32,7 +29,7 @@ type BlobClient struct { // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. // - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -67,15 +64,15 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } req.Raw().Header["x-ms-copy-action"] = []string{"abort"} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -104,7 +101,7 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. @@ -140,31 +137,31 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -206,7 +203,7 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { @@ -239,30 +236,30 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -309,7 +306,7 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -346,29 +343,29 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -411,7 +408,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -450,77 +447,77 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-requires-sync"] = []string{"true"} - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -585,7 +582,7 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. @@ -621,12 +618,24 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -634,35 +643,23 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -724,7 +721,7 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -753,45 +750,45 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob return nil, err } reqQP := req.Raw().URL.Query() + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) + } if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if options != nil && options.DeleteType != nil { - reqQP.Set("deletetype", string(*options.DeleteType)) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.DeleteSnapshots != nil { - req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -820,7 +817,7 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { @@ -849,15 +846,21 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -887,7 +890,7 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -920,25 +923,32 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.RangeGetContentMD5 != nil { - req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if options != nil && options.RangeGetContentCRC64 != nil { - req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -946,29 +956,25 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.StructuredBodyType != nil { + req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1167,15 +1173,25 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } for hh := range resp.Header { if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-structured-body"); val != "" { + result.StructuredBodyType = &val + } + if val := resp.Header.Get("x-ms-structured-content-length"); val != "" { + structuredContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.StructuredContentLength = &structuredContentLength + } if val := resp.Header.Get("x-ms-tag-count"); val != "" { tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -1195,7 +1211,7 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { var err error @@ -1222,11 +1238,17 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1246,6 +1268,13 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } @@ -1262,7 +1291,7 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -1295,45 +1324,45 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1549,10 +1578,10 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } for hh := range resp.Header { if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) } } if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { @@ -1580,7 +1609,7 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -1610,17 +1639,17 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tags") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } if options != nil && options.VersionID != nil { reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1630,7 +1659,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1662,7 +1691,7 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -1701,38 +1730,38 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.QueryRequest != nil { if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { return nil, err @@ -1896,7 +1925,7 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1930,28 +1959,28 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1990,7 +2019,7 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -2024,28 +2053,28 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2087,7 +2116,7 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - expiryOptions - Required. Indicates mode of the expiry time // - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { @@ -2120,7 +2149,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2128,7 +2157,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti if options != nil && options.ExpiresOn != nil { req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2167,7 +2196,7 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -2202,14 +2231,24 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -2217,32 +2256,22 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -2288,7 +2317,7 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -2318,24 +2347,30 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2374,7 +2409,7 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - legalHold - Specified if a legal hold should be set on the blob. // - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { @@ -2403,16 +2438,22 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "legalhold") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2449,7 +2490,7 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC // pairs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -2485,15 +2526,24 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -2501,32 +2551,23 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2581,7 +2622,7 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - tags - Blob tags // - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -2619,23 +2660,23 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, tags); err != nil { return nil, err } @@ -2670,7 +2711,7 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - tier - Indicates the tier to be set on the blob. // - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -2704,28 +2745,28 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} - if options != nil && options.RehydratePriority != nil { - req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2747,7 +2788,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -2785,6 +2826,41 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { if v != nil { @@ -2792,66 +2868,31 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop } } } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } if options != nil && options.RehydratePriority != nil { req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.BlobTagsString != nil { req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if options != nil && options.SealBlob != nil { - req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2899,7 +2940,7 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { var err error @@ -2931,11 +2972,11 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index bfd7f5ea..324db765 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -36,7 +33,7 @@ type BlockBlobClient struct { // belong to. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - blocks - Blob Blocks. // - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList // method. @@ -75,11 +72,30 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -90,24 +106,17 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -115,47 +124,35 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, blocks); err != nil { return nil, err } @@ -227,7 +224,7 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response // GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. // - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -257,26 +254,26 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("blocklisttype", string(listType)) reqQP.Set("comp", "blocklist") if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - reqQP.Set("blocklisttype", string(listType)) if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -332,7 +329,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // Block from URL API in conjunction with Put Block List. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request @@ -375,13 +372,31 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -392,21 +407,25 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -414,66 +433,44 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.BlobTagsString != nil { req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if options != nil && options.CopySourceBlobProperties != nil { - req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -535,7 +532,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // StageBlock - The Stage Block operation creates a new block to be committed as part of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -570,21 +567,25 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -592,17 +593,19 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.StructuredBodyType != nil { + req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType} + } + if options != nil && options.StructuredContentLength != nil { + req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -652,6 +655,9 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-structured-body"); val != "" { + result.StructuredBodyType = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } @@ -662,7 +668,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // are read from a URL. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -700,22 +706,23 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -723,35 +730,34 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -810,7 +816,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // the content of a block blob, use the Put Block List operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - body - Initial data // - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. @@ -848,13 +854,31 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -865,21 +889,18 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -887,50 +908,41 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.StructuredBodyType != nil { + req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType} + } + if options != nil && options.StructuredContentLength != nil { + req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -943,6 +955,13 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -983,6 +1002,9 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-structured-body"); val != "" { + result.StructuredBodyType = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 95af9e15..48724a4c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -346,6 +343,7 @@ const ( ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemPermissions ListBlobsIncludeItem = "permissions" ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" @@ -361,6 +359,7 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { ListBlobsIncludeItemImmutabilitypolicy, ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemPermissions, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, @@ -523,6 +522,7 @@ const ( StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeBlobAccessTierNotSupportedForAccountType StorageErrorCode = "BlobAccessTierNotSupportedForAccountType" StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" @@ -641,6 +641,7 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode { StorageErrorCodeAuthorizationResourceTypeMismatch, StorageErrorCodeAuthorizationServiceMismatch, StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeBlobAccessTierNotSupportedForAccountType, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index ce1ff6fd..61ddc669 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -34,7 +31,7 @@ type ContainerClient struct { // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. @@ -70,23 +67,23 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} - } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -129,7 +126,7 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { @@ -162,22 +159,22 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} - } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -225,7 +222,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -262,21 +259,21 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -319,7 +316,7 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C // fails // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. // - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { @@ -351,18 +348,11 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.Access != nil { req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -372,7 +362,14 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -412,7 +409,7 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -445,21 +442,21 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -489,7 +486,7 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai // Filter blobs searches within the given container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { @@ -517,27 +514,27 @@ func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, whe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } - reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + reqQP.Set("where", where) + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -570,7 +567,7 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -599,20 +596,20 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -657,7 +654,7 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { @@ -685,11 +682,17 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -709,6 +712,13 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } @@ -725,7 +735,7 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { @@ -757,15 +767,15 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -854,7 +864,7 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) // NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager // method. // @@ -865,10 +875,9 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) @@ -876,18 +885,19 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -921,7 +931,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp // NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that // acts as a placeholder for all blobs whose names begin with the same substring up to the // appearance of the delimiter character. The delimiter may be a single character or a string. @@ -955,30 +965,30 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) - } reqQP.Set("delimiter", delimiter) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1014,7 +1024,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1048,20 +1058,20 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1100,7 +1110,7 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - sourceContainerName - Required. Specifies the name of the container to rename. // - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { @@ -1128,13 +1138,13 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "rename") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1142,7 +1152,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo if options != nil && options.SourceLeaseID != nil { req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1172,7 +1182,7 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1206,20 +1216,20 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1261,7 +1271,7 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { var err error @@ -1288,13 +1298,13 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "undelete") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1304,7 +1314,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options if options != nil && options.DeletedContainerVersion != nil { req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1334,7 +1344,7 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - containerACL - the acls for the container // - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy // method. @@ -1365,29 +1375,29 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.Access != nil { - req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} - } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} type wrapper struct { XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` @@ -1433,7 +1443,7 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1462,12 +1472,19 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "metadata") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } @@ -1478,14 +1495,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt } } } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1524,7 +1534,7 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ @@ -1555,20 +1565,20 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "batch") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, multipartContentType); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index 7251de83..803b2858 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -89,6 +86,9 @@ type BlobName struct { type BlobPrefix struct { // REQUIRED Name *string `xml:"Name"` + + // Properties of a blob + Properties *BlobProperties `xml:"Properties"` } // BlobProperties - Properties of a blob @@ -98,6 +98,7 @@ type BlobProperties struct { // REQUIRED LastModified *time.Time `xml:"Last-Modified"` + ACL *string `xml:"Acl"` AccessTier *AccessTier `xml:"AccessTier"` AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` AccessTierInferred *bool `xml:"AccessTierInferred"` @@ -127,6 +128,7 @@ type BlobProperties struct { // The name of the encryption scope under which the blob is encrypted. EncryptionScope *string `xml:"EncryptionScope"` ExpiresOn *time.Time `xml:"Expiry-Time"` + Group *string `xml:"Group"` ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` IncrementalCopy *bool `xml:"IncrementalCopy"` @@ -136,11 +138,14 @@ type BlobProperties struct { LeaseState *LeaseStateType `xml:"LeaseState"` LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` LegalHold *bool `xml:"LegalHold"` + Owner *string `xml:"Owner"` + Permissions *string `xml:"Permissions"` // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High // and Standard. RehydratePriority *RehydratePriority `xml:"RehydratePriority"` RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ResourceType *string `xml:"ResourceType"` ServerEncrypted *bool `xml:"ServerEncrypted"` TagCount *int32 `xml:"TagCount"` } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index 7e094db8..e2e64d6f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -46,8 +43,12 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er if err := dec.DecodeElement(aux, &start); err != nil { return err } - a.Expiry = (*time.Time)(aux.Expiry) - a.Start = (*time.Time)(aux.Start) + if aux.Expiry != nil && !(*time.Time)(aux.Expiry).IsZero() { + a.Expiry = (*time.Time)(aux.Expiry) + } + if aux.Start != nil && !(*time.Time)(aux.Start).IsZero() { + a.Start = (*time.Time)(aux.Start) + } return nil } @@ -152,19 +153,35 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) if err := dec.DecodeElement(aux, &start); err != nil { return err } - b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.AccessTierChangeTime != nil && !(*time.Time)(aux.AccessTierChangeTime).IsZero() { + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + } if aux.ContentMD5 != nil { if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { return err } } - b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) - b.CreationTime = (*time.Time)(aux.CreationTime) - b.DeletedTime = (*time.Time)(aux.DeletedTime) - b.ExpiresOn = (*time.Time)(aux.ExpiresOn) - b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) - b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) - b.LastModified = (*time.Time)(aux.LastModified) + if aux.CopyCompletionTime != nil && !(*time.Time)(aux.CopyCompletionTime).IsZero() { + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + } + if aux.CreationTime != nil && !(*time.Time)(aux.CreationTime).IsZero() { + b.CreationTime = (*time.Time)(aux.CreationTime) + } + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + b.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.ExpiresOn != nil && !(*time.Time)(aux.ExpiresOn).IsZero() { + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + } + if aux.ImmutabilityPolicyExpiresOn != nil && !(*time.Time)(aux.ImmutabilityPolicyExpiresOn).IsZero() { + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + } + if aux.LastAccessedOn != nil && !(*time.Time)(aux.LastAccessedOn).IsZero() { + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + b.LastModified = (*time.Time)(aux.LastModified) + } return nil } @@ -271,8 +288,12 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem if err := dec.DecodeElement(aux, &start); err != nil { return err } - c.DeletedTime = (*time.Time)(aux.DeletedTime) - c.LastModified = (*time.Time)(aux.LastModified) + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + c.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + c.LastModified = (*time.Time)(aux.LastModified) + } return nil } @@ -316,7 +337,9 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) if err := dec.DecodeElement(aux, &start); err != nil { return err } - g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + if aux.LastSyncTime != nil && !(*time.Time)(aux.LastSyncTime).IsZero() { + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + } return nil } @@ -436,8 +459,12 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen if err := dec.DecodeElement(aux, &start); err != nil { return err } - u.SignedExpiry = (*time.Time)(aux.SignedExpiry) - u.SignedStart = (*time.Time)(aux.SignedStart) + if aux.SignedExpiry != nil && !(*time.Time)(aux.SignedExpiry).IsZero() { + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + } + if aux.SignedStart != nil && !(*time.Time)(aux.SignedStart).IsZero() { + u.SignedStart = (*time.Time)(aux.SignedStart) + } return nil } @@ -451,18 +478,8 @@ func populate(m map[string]any, k string, v any) { } } -func populateAny(m map[string]any, k string, v any) { - if v == nil { - return - } else if azcore.IsNullValue(v) { - m[k] = nil - } else { - m[k] = v - } -} - func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go index 216f8b73..01d1422d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -46,6 +43,13 @@ type AppendBlobClientAppendBlockOptions struct { // analytics logging is enabled. RequestID *string + // Required if the request body is a structured message. Specifies the message schema version and properties. + StructuredBodyType *string + + // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message + // body. Will always be smaller than Content-Length. + StructuredContentLength *int64 + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 @@ -239,9 +243,18 @@ type BlobClientDeleteImmutabilityPolicyOptions struct { // analytics logging is enabled. RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string } // BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. @@ -294,6 +307,10 @@ type BlobClientDownloadOptions struct { // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] Snapshot *string + // Specifies the response content should be returned as a structured message and specifies the message schema version and + // properties. + StructuredBodyType *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 @@ -305,7 +322,13 @@ type BlobClientDownloadOptions struct { // BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. type BlobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } // BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. @@ -426,9 +449,18 @@ type BlobClientSetImmutabilityPolicyOptions struct { // analytics logging is enabled. RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string } // BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. @@ -437,9 +469,18 @@ type BlobClientSetLegalHoldOptions struct { // analytics logging is enabled. RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string } // BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. @@ -705,6 +746,13 @@ type BlockBlobClientStageBlockOptions struct { // analytics logging is enabled. RequestID *string + // Required if the request body is a structured message. Specifies the message schema version and properties. + StructuredBodyType *string + + // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message + // body. Will always be smaller than Content-Length. + StructuredContentLength *int64 + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 @@ -742,6 +790,13 @@ type BlockBlobClientUploadOptions struct { // analytics logging is enabled. RequestID *string + // Required if the request body is a structured message. Specifies the message schema version and properties. + StructuredBodyType *string + + // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message + // body. Will always be smaller than Content-Length. + StructuredContentLength *int64 + // Optional. Indicates the tier to be set on the blob. Tier *AccessTier @@ -876,7 +931,13 @@ type ContainerClientGetAccessPolicyOptions struct { // ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. type ContainerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } // ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. @@ -1307,6 +1368,13 @@ type PageBlobClientUploadPagesOptions struct { // analytics logging is enabled. RequestID *string + // Required if the request body is a structured message. Specifies the message schema version and properties. + StructuredBodyType *string + + // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message + // body. Will always be smaller than Content-Length. + StructuredContentLength *int64 + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 @@ -1360,7 +1428,13 @@ type ServiceClientFilterBlobsOptions struct { // ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. type ServiceClientGetAccountInfoOptions struct { - // placeholder for future optional parameters + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 } // ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index bfa9883f..8d8c9534 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -30,7 +27,7 @@ type PageBlobClient struct { // ClearPages - The Clear Pages operation clears a set of pages from a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -69,13 +66,25 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -83,41 +92,29 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -181,7 +178,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // 2016-05-31. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -218,27 +215,27 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -283,7 +280,7 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) // Create - The Create operation creates a new page blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. @@ -322,13 +319,28 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if options != nil && options.Tier != nil { req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -336,24 +348,22 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -361,48 +371,35 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - if options != nil && options.BlobSequenceNumber != nil { - req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -464,7 +461,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo // NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot // of a page blob // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -498,45 +495,45 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -585,7 +582,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( // NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that // were changed between target blob and previous snapshot. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -619,51 +616,51 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.Prevsnapshot != nil { - reqQP.Set("prevsnapshot", *options.Prevsnapshot) - } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.PrevSnapshotURL != nil { - req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -712,7 +709,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // Resize - Resize the Blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. // - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. @@ -750,8 +747,25 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -759,33 +773,16 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -831,7 +828,7 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo // UpdateSequenceNumber - Update the sequence number of the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to // page blobs only. This property indicates how the service should modify the blob's sequence number // - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber @@ -868,33 +865,33 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -940,7 +937,7 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp // UploadPages - The Upload Pages operation writes a range of pages to a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - body - Initial data // - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. @@ -980,19 +977,31 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -1000,41 +1009,35 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + req.Raw().Header["x-ms-page-write"] = []string{"update"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.StructuredBodyType != nil { + req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.StructuredContentLength != nil { + req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -1101,6 +1104,9 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-structured-body"); val != "" { + result.StructuredBodyType = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } @@ -1111,7 +1117,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // a URL // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - sourceURL - Specify a URL to the copy source. // - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header // and x-ms-range/Range destination range header. @@ -1158,31 +1164,41 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - req.Raw().Header["x-ms-source-range"] = []string{sourceRange} - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-range"] = []string{rangeParam} if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} @@ -1190,44 +1206,34 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go index 738d23c8..5ed22156 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -91,6 +88,9 @@ type AppendBlobClientAppendBlockResponse struct { // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string + // StructuredBodyType contains the information returned from the x-ms-structured-body header response. + StructuredBodyType *string + // Version contains the information returned from the x-ms-version header response. Version *string } @@ -472,6 +472,12 @@ type BlobClientDownloadResponse struct { // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string + // StructuredBodyType contains the information returned from the x-ms-structured-body header response. + StructuredBodyType *string + + // StructuredContentLength contains the information returned from the x-ms-structured-content-length header response. + StructuredContentLength *int64 + // TagCount contains the information returned from the x-ms-tag-count header response. TagCount *int64 @@ -493,6 +499,9 @@ type BlobClientGetAccountInfoResponse struct { // Date contains the information returned from the Date header response. Date *time.Time + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1170,6 +1179,9 @@ type BlockBlobClientStageBlockResponse struct { // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string + // StructuredBodyType contains the information returned from the x-ms-structured-body header response. + StructuredBodyType *string + // Version contains the information returned from the x-ms-version header response. Version *string } @@ -1179,6 +1191,9 @@ type BlockBlobClientUploadResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -1203,6 +1218,9 @@ type BlockBlobClientUploadResponse struct { // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string + // StructuredBodyType contains the information returned from the x-ms-structured-body header response. + StructuredBodyType *string + // Version contains the information returned from the x-ms-version header response. Version *string @@ -1374,6 +1392,9 @@ type ContainerClientGetAccountInfoResponse struct { // Date contains the information returned from the Date header response. Date *time.Time + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1876,6 +1897,9 @@ type PageBlobClientUploadPagesResponse struct { // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string + // StructuredBodyType contains the information returned from the x-ms-structured-body header response. + StructuredBodyType *string + // Version contains the information returned from the x-ms-version header response. Version *string } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index 9a73b730..8764591b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -33,7 +30,7 @@ type ServiceClient struct { // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { @@ -62,25 +59,25 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } - reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + reqQP.Set("where", where) req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -112,7 +109,7 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { var err error @@ -139,11 +136,17 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -186,7 +189,7 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { var err error @@ -213,17 +216,17 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -249,7 +252,7 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { var err error @@ -276,17 +279,17 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "stats") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -319,7 +322,7 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - keyInfo - Key information // - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. @@ -348,17 +351,17 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "userdelegationkey") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, keyInfo); err != nil { return nil, err } @@ -393,7 +396,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified // account // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager // method. // @@ -405,8 +408,8 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) @@ -414,18 +417,18 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -451,7 +454,7 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - storageServiceProperties - The StorageService properties. // - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { @@ -479,17 +482,17 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { return nil, err } @@ -514,7 +517,7 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2025-01-05 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ @@ -549,15 +552,15 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, multipartContentType); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 58665032..ee3732eb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -36,7 +33,14 @@ func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { } func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } p, err := time.Parse(time.RFC1123, string(data)) *t = dateTimeRFC1123(p) return err } + +func (t dateTimeRFC1123) String() string { + return time.Time(t).Format(time.RFC1123) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index 82b37013..e9eac9bc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. @@ -15,12 +12,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -36,17 +37,36 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + if len(data) == 0 { + return nil + } + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -56,3 +76,7 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { *t = dateTimeRFC3339(p) return err } + +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 1bd0e4de..355d0176 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -1,6 +1,3 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go index c1b3a3d2..5c44af34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -19,7 +19,7 @@ const ( type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 - NumChunks uint16 + NumChunks uint64 Concurrency uint16 Operation func(ctx context.Context, offset int64, chunkSize int64) error OperationName string @@ -44,7 +44,6 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Create the goroutines that process each operation (in parallel). for g := uint16(0); g < o.Concurrency; g++ { - //grIndex := g go func() { for f := range operationChannel { err := f() @@ -54,7 +53,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { curChunkSize := o.ChunkSize if chunkNum == o.NumChunks-1 { // Last chunk @@ -69,7 +68,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Wait for the operations to complete. var firstErr error = nil - for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { responseError := <-operationResponseChannel // record the first error (the original error which should cause the other chunks to fail with canceled context) if responseError != nil && firstErr == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go index e7c8e921..fff61016 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -8,11 +8,12 @@ package shared import ( "errors" + "net/http" + "strings" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "net/http" - "strings" ) type storageAuthorizer struct { @@ -20,13 +21,14 @@ type storageAuthorizer struct { tenantID string } -func NewStorageChallengePolicy(cred azcore.TokenCredential) policy.Policy { - s := storageAuthorizer{scopes: []string{TokenScope}} - return runtime.NewBearerTokenPolicy(cred, []string{TokenScope}, &policy.BearerTokenOptions{ +func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string, allowHTTP bool) policy.Policy { + s := storageAuthorizer{scopes: []string{audience}} + return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{ AuthorizationHandler: policy.AuthorizationHandler{ OnRequest: s.onRequest, OnChallenge: s.onChallenge, }, + InsecureAllowCredentialWithHTTP: allowHTTP, }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go index cdcadf31..072fd27b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go @@ -1,6 +1,6 @@ -//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) +//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix || zos) // +build go1.18 -// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix +// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix zos // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index c131facf..c7922076 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -44,15 +44,6 @@ const ( const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 -const ( - AppendBlobClient = "azblob/appendblob.Client" - BlobClient = "azblob/blob.Client" - BlockBlobClient = "azblob/blockblob.Client" - ContainerClient = "azblob/container.Client" - PageBlobClient = "azblob/pageblob.Client" - ServiceClient = "azblob/service.Client" -) - var CRC64Table = crc64.MakeTable(crc64Polynomial) // CopyOptions returns a zero-value T if opts is nil. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index 14e90a1f..63ceac97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -8,7 +8,6 @@ package pageblob import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "net/http" "net/url" @@ -23,6 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -36,11 +36,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -54,7 +55,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -70,7 +71,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -379,8 +380,8 @@ func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (pb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return pb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go index e6148f17..39aef20f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -198,7 +198,7 @@ type GetPageRangesOptions struct { func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { - return nil, nil, nil + return &generated.PageBlobClientGetPageRangesOptions{}, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go index 4c23208e..20f9875a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -8,6 +8,7 @@ package sas import ( "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "net" "net/url" "strings" @@ -23,7 +24,7 @@ const ( var ( // Version is the default version encoded in the SAS token. - Version = "2021-12-02" + Version = generated.ServiceVersion ) // TimeFormats ISO 8601 format. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index 45f73084..813fa77a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -255,7 +255,7 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us signature: signature, } - //User delegation SAS specific parameters + // User delegation SAS specific parameters p.signedOID = *udk.SignedOID p.signedTID = *udk.SignedTID p.signedStart = *udk.SignedStart @@ -272,7 +272,7 @@ func getCanonicalName(account string, containerName string, blobName string, dir // Blob: "/blob/account/containername/blobname" elements := []string{"/blob/", account, "/", containerName} if blobName != "" { - elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + elements = append(elements, "/", strings.ReplaceAll(blobName, "\\", "/")) } else if directoryName != "" { elements = append(elements, "/", directoryName) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go index 57fe053f..758739cb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -117,7 +117,7 @@ func (up URLParts) String() string { rawQuery := up.UnparsedParams - //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + // If no snapshot is initially provided, fill it in from the SAS query properties to help the user if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go index 46177534..cf39c3d5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -11,9 +11,6 @@ import ( "context" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "net/http" "strings" "time" @@ -21,8 +18,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" @@ -40,15 +40,16 @@ type Client base.Client[generated.ServiceClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred)), nil + return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -58,11 +59,11 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewServiceClient(serviceURL, azClient, nil)), nil + return (*Client)(base.NewServiceClient(serviceURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -74,12 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewServiceClient(serviceURL, azClient, cred)), nil + return (*Client)(base.NewServiceClient(serviceURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -136,6 +137,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { return base.InnerClient((*base.Client[generated.BlobClient])(b)) } +func (s *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ServiceClient])(s)) +} + // URL returns the URL endpoint used by the Client object. func (s *Client) URL() string { return s.generated().Endpoint() @@ -145,7 +150,7 @@ func (s *Client) URL() string { // this Client's URL. The new container.Client uses the same request policy pipeline as the Client. func (s *Client) NewContainerClient(containerName string) *container.Client { containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(shared.ContainerClient), s.credential())) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(exported.ModuleName), s.credential(), s.getClientOptions())) } // CreateContainer is a lifecycle method to creates a new container under the specified account. @@ -275,7 +280,6 @@ func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.A st := o.format() qps, err := sas.AccountSignatureValues{ Version: sas.Version, - Protocol: sas.ProtocolHTTPS, Permissions: permissions.String(), ResourceTypes: resources.String(), StartTime: st, @@ -315,7 +319,8 @@ func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { switch cred := s.credential().(type) { case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred) + conOptions := s.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) case *SharedKeyCredential: authPolicy = exported.NewSharedKeyCredPolicy(cred) case nil: diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 00000000..fe79e3ad --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +/toml.test +/toml-test diff --git a/vendor/github.com/joeshaw/multierror/LICENSE b/vendor/github.com/BurntSushi/toml/COPYING similarity index 97% rename from vendor/github.com/joeshaw/multierror/LICENSE rename to vendor/github.com/BurntSushi/toml/COPYING index 5869b24e..01b57432 100644 --- a/vendor/github.com/joeshaw/multierror/LICENSE +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Joe Shaw +Copyright (c) 2013 TOML authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 00000000..235496ee --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,120 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.18 or newer; add it to your go.mod with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Examples +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which can be decoded with: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time +} + +var conf Config +_, err := toml.Decode(tomlData, &conf) +``` + +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: + +```toml +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] +``` + +Can be decoded with: + +```go +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address +} + +func (a *address) UnmarshalText(text []byte) error { + var err error + a.Address, err = mail.ParseAddress(string(text)) + return err +} + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 00000000..3fa516ca --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,638 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "io/fs" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(any) error +} + +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v any) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) + return err +} + +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v any) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v any) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded any + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. +// +// # Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v any) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, any, or something that + // implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := io.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data any, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + err := v.UnmarshalTOML(data) + if err != nil { + return md.parseErr(err) + } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } + return nil + } + if v, ok := rvi.(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return md.e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error { + tmap, ok := mapping.(map[string]any) + if !ok { + if mapping == nil { + return nil + } + return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping)) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + + tmap, ok := mapping.(map[string]any) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + + err := md.unify(v, indirect(rvval)) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data any, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data any, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data any, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error { + rvk := rv.Kind() + + if num, ok := data.(float64); ok { + switch rvk { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data any, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) + } + rv.SetInt(int64(dur)) + return nil + } + } + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil +} + +func (md *MetaData) unifyBool(data any, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data any, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return md.parseErr(err) + } + return nil +} + +func (md *MetaData) badtype(dst string, data any) error { + return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + d := string(md.data) + return ParseError{ + Message: err.Error(), + err: err, + LastKey: k, + Position: md.keyInfo[k].pos.withCol(d), + Line: md.keyInfo[k].pos.Line, + input: d, + } +} + +func (md *MetaData) e(format string, args ...any) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v any) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { + return true + } + return false +} + +// fmt %T with "interface {}" replaced with "any", which is far more readable. +func fmtType(t any) string { + return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any") +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 00000000..155709a8 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,29 @@ +package toml + +import ( + "encoding" + "io" +) + +// TextMarshaler is an alias for encoding.TextMarshaler. +// +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) } + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v any) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 00000000..82c90a90 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,8 @@ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 00000000..ac196e7d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,776 @@ +package toml + +import ( + "bufio" + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Marshal returns a TOML representation of the Go value. +// +// See [Encoder] for a description of the encoding process. +func Marshal(v any) ([]byte, error) { + buff := new(bytes.Buffer) + if err := NewEncoder(buff).Encode(v); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + Indent string // string for a single indentation level; default is two spaces. + hasWritten bool // written any output to w yet? + w *bufio.Writer +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: bufio.NewWriter(w), Indent: " "} +} + +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v any) error { + rv := eindirect(reflect.ValueOf(v)) + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) + } + + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("nan") + } else if math.IsInf(f, 0) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("nan") + } else if math.IsInf(f, 0) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface()))) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := eindirect(rv.Index(i)) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := eindirect(rv.Index(i)) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []reflect.Value + for _, mapKey := range rv.MapKeys() { + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { + mapKeysSub = append(mapKeysSub, mapKey) + } else { + mapKeysDirect = append(mapKeysDirect, mapKey) + } + } + + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(mapKey)) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey.String()}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey.String()), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int, totalFields int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != totalFields-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if isTableArray(rv) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + default: + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false + } + + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { + encPanic(errArrayNilElement) + } + + if ret && !typeEqual(tomlHash, tt) { + ret = false + } + } + return ret +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !isEmpty(rv.Field(i)) { + return false + } + } + return true + case reflect.Bool: + return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. + if len(key) == 0 { + enc.eElement(val) + return + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...any) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +// Resolve any level of pointers to the actual value (e.g. **string → string). +func eindirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 00000000..b7077d3a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,347 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Col int // Error column, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Length of the error in bytes. +} + +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll + } + return p +} + +func (pe ParseError) Error() string { + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, pe.Message) +} + +// ErrorWithPosition returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + var ( + lines = strings.Split(pe.input, "\n") + b = new(strings.Builder) + ) + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + pe.Message, pe.Position.Line, pe.Position.Col) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2])) + } + + /// Expand tabs, so that the ^^^s are at the correct position, but leave + /// "column 10-13" intact. Adjusting this to the visual column would be + /// better, but we don't know the tabsize of the user in their editor, which + /// can be 8, 4, 2, or something else. We can't know. So leaving it as the + /// character index is probably the "most correct". + expanded := expandTab(lines[pe.Position.Line-1]) + diff := len(expanded) - len(lines[pe.Position.Line-1]) + + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func expandTab(s string) string { + var ( + b strings.Builder + l int + fill = func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = ' ' + } + return string(b) + } + ) + b.Grow(len(s)) + for _, r := range s { + switch r { + case '\t': + tw := 8 - l%8 + b.WriteString(fill(tw)) + l += tw + default: + b.WriteRune(r) + l += 1 + } + } + return b.String() +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errParseDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i any // int or float + size string // "int64", "uint16", etc. + } + errUnsafeFloat struct { + i interface{} // float32 or float64 + size string // "float32" or "float64" + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) } +func (e errParseDate) Usage() string { return usageDate } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errUnsafeFloat) Error() string { + return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size) +} +func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65,535 + uint32 │ 0 │ 4,294,967,295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageUnsafeFloat = ` +This number is outside of the "safe" range for floating point numbers; whole +(non-fractional) numbers outside the below range can not always be represented +accurately in a float, leading to some loss of accuracy. + +Explicitly mark a number as a fractional unit by adding ".0", which will incur +some loss of accuracy; for example: + + f = 2_000_000_000.0 + +Accuracy ranges: + + float32 = 16,777,215 + float64 = 9,007,199,254,740,991 +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` + +const usageDate = ` +A TOML datetime must be in one of the following formats: + + 2006-01-02T15:04:05Z07:00 Date and time, with timezone. + 2006-01-02T15:04:05 Date and time, but without timezone. + 2006-01-02 Date without a time or timezone. + 15:04:05 Just a time, without any timezone. + +Seconds may optionally have a fraction, up to nanosecond precision: + + 15:04:05.123 + 15:04:05.856018510 +` + +// TOML 1.1: +// The seconds part in times is optional, and may be omitted: +// 2006-01-02T15:04Z07:00 +// 2006-01-02T15:04 +// 15:04 diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 00000000..022f15bc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 00000000..1c3b4770 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1272 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemStringEsc + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + esc bool + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string, tomlNext bool) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError && w == 1 { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...any) stateFn { + if lx.atEOF { + pos := lx.getPos() + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r, lx.tomlNext) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + if lx.esc { + lx.esc = false + lx.emit(itemStringEsc) + } else { + lx.emit(itemString) + } + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.esc = false + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + if isNL(lx.next()) { /// \ escaping newline. + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + lx.esc = true + r := lx.next() + switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHex(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHex(r) { + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ, item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } +func isBareKeyChar(r rune, tomlNext bool) bool { + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' +} diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 00000000..0d337026 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,145 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo + mapping map[string]any + keys []Key + decoded map[string]struct{} + data []byte // Input file; for errors. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]any + ok bool + hashOrVal any = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]any); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a [Primitive] value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + // This is called quite often, so it's a bit funky to make it faster. + var b strings.Builder + b.Grow(len(k) * 25) +outer: + for i, kk := range k { + if i > 0 { + b.WriteByte('.') + } + if kk == "" { + b.WriteString(`""`) + } else { + for _, r := range kk { + // "Inline" isBareKeyChar + if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') { + b.WriteByte('"') + b.WriteString(dblQuotedReplacer.Replace(kk)) + b.WriteByte('"') + continue outer + } + } + b.WriteString(kk) + } + } + return b.String() +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, r := range k[i] { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' { + continue + } + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + return k[i] +} + +// Like append(), but only increase the cap by 1. +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece. +func (k Key) last() string { return k[len(k)-1] } // last piece of this key. diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 00000000..e3ea8a9a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,845 @@ +package toml + +import ( + "fmt" + "math" + "os" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + tomlNext bool + + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]any // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType +} + +func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 + data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]any), + lx: lex(data, tomlNext), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + tomlNext: tomlNext, + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + Message: err.Error(), + err: err, + Position: it.pos.withCol(p.lx.input), + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...any) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos.withCol(p.lx.input), + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...any) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos.withCol(p.lx.input), + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), + Line: it.pos.Line, + LastKey: p.current(), + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...any) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash, item.pos) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key.last() + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key.parent() + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, vItem.pos) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemStringEsc, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (any, tomlType) { + switch it.typ { + case itemString: + return it.val, p.typeOfPrimitive(it) + case itemStringEsc: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (any, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (any, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + signbit := false + if val == "+nan" || val == "-nan" { + signbit = val == "-nan" + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + if signbit { + num = math.Copysign(num, -1) + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location + next bool +}{ + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, +} + +func (p *parser) valueDatetime(it item) (any, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + if missingLeadingZero(it.val, dt.fmt) { + p.panicErr(it, errParseDate{it.val}) + } + ok = true + break + } + } + if !ok { + p.panicErr(it, errParseDate{it.val}) + } + return t, p.typeOfPrimitive(it) +} + +// Go's time.Parse() will accept numbers without a leading zero; there isn't any +// way to require it. https://github.com/golang/go/issues/29911 +// +// Depend on the fact that the separators (- and :) should always be at the same +// location. +func missingLeadingZero(d, l string) bool { + for i, c := range []byte(l) { + if c == '.' || c == 'Z' { + return false + } + if (c < '0' || c > '9') && d[i] != c { + return true + } + } + return false +} + +func (p *parser) valueArray(it item) (any, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + // Initialize to a non-nil slice to make it consistent with how S = [] + // decodes into a non-nil slice inside something like struct { S + // []string }. See #338 + array = make([]any, 0, 2) + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + + // XXX: type isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + _ = typ + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) { + var ( + topHash = make(map[string]any) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key.last() + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key.parent() + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set the value. + val, typ := p.value(p.next(), false) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, it.pos) + + hash := topHash + for _, c := range context { + h, ok := hash[c] + if !ok { + h = make(map[string]any) + hash[c] = h + } + hash, ok = h.(map[string]any) + if !ok { + p.panicf("%q is not a table", p.context) + } + } + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return topHash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHex is a superset of all the permissible characters surrounding an + // underscore. + accept = isHex(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + /// Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0, len(key)-1) + + /// We only need implicit hashes for the parents. + for _, k := range key.parent() { + _, ok := hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]any) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]any: + hashContext = t[len(t)-1] + case map[string]any: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key.last() + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]any, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]any); ok { + hashContext[k] = append(hash, make(map[string]any)) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key.last(), make(map[string]any)) + } + p.context = append(p.context, key.last()) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value any) { + var ( + tmpHash any + ok bool + hash = p.mapping + keyContext = make(Key, 0, len(p.context)+1) + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]any: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]any: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + // Otherwise, we have a concrete key trying to override a previous key, + // which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType, pos Position) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. +func (p *parser) stripEscapedNewlines(s string) string { + var ( + b strings.Builder + i int + ) + b.Grow(len(s)) + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() + } + i += ix + + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue + } + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } + } + if j == i+1 { + // Not a whitespace escape. + i++ + continue + } + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. (It's a bad escape sequence, + // but we can let replaceEscapes catch it.) + i++ + continue + } + b.WriteString(s[:i]) + s = s[j:] + i = 0 + } +} + +func (p *parser) replaceEscapes(it item, str string) string { + var ( + b strings.Builder + skip = 0 + ) + b.Grow(len(str)) + for i, c := range str { + if skip > 0 { + skip-- + continue + } + if c != '\\' { + b.WriteRune(c) + continue + } + + if i >= len(str) { + p.bug("Escape sequence at end of string.") + return "" + } + switch str[i+1] { + default: + p.bug("Expected valid escape code after \\, but got %q.", str[i+1]) + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", str[i+1]) + case 'b': + b.WriteByte(0x08) + skip = 1 + case 't': + b.WriteByte(0x09) + skip = 1 + case 'n': + b.WriteByte(0x0a) + skip = 1 + case 'f': + b.WriteByte(0x0c) + skip = 1 + case 'r': + b.WriteByte(0x0d) + skip = 1 + case 'e': + if p.tomlNext { + b.WriteByte(0x1b) + skip = 1 + } + case '"': + b.WriteByte(0x22) + skip = 1 + case '\\': + b.WriteByte(0x5c) + skip = 1 + // The lexer guarantees the correct number of characters are present; + // don't need to check here. + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 + } + case 'u': + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) + b.WriteRune(escaped) + skip = 5 + case 'U': + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10]) + b.WriteRune(escaped) + skip = 9 + } + } + return b.String() +} + +func (p *parser) asciiEscapeToUnicode(it item, s string) rune { + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 00000000..10c51f7e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,238 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 00000000..1c090d33 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,65 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { return string(btype) } +func (btype tomlBaseType) String() string { return btype.typeString() } + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString, itemStringEsc: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/ClickHouse/ch-go/compress/compress.go b/vendor/github.com/ClickHouse/ch-go/compress/compress.go index a89c6401..bf2cbf9c 100644 --- a/vendor/github.com/ClickHouse/ch-go/compress/compress.go +++ b/vendor/github.com/ClickHouse/ch-go/compress/compress.go @@ -7,18 +7,39 @@ import ( "github.com/go-faster/city" ) -//go:generate go run github.com/dmarkham/enumer -transform snake_upper -type Method -output method_enum.go +//go:generate go run github.com/dmarkham/enumer -transform upper -type Method -output method_enum.go // Method is compression codec. type Method byte // Possible compression methods. const ( - None Method = 0x02 - LZ4 Method = 0x82 - ZSTD Method = 0x90 + None Method = iota + LZ4 + LZ4HC + ZSTD + NumMethods int = iota ) +type methodEncoding byte + +const ( + encodedNone methodEncoding = 0x02 + encodedLZ4 methodEncoding = 0x82 + encodedLZ4HC methodEncoding = encodedLZ4 + encodedZSTD methodEncoding = 0x90 +) + +var methodTable = map[Method]methodEncoding{ + None: encodedNone, + LZ4: encodedLZ4, + LZ4HC: encodedLZ4HC, + ZSTD: encodedZSTD, +} + +// Level for supporting compression codecs. +type Level uint32 + // Constants for compression encoding. // // See https://go-faster.org/docs/clickhouse/compression for reference. diff --git a/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go b/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go index 8d443520..373eb288 100644 --- a/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go +++ b/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go @@ -1,4 +1,4 @@ -// Code generated by "enumer -transform snake_upper -type Method -output method_enum.go"; DO NOT EDIT. +// Code generated by "enumer -transform upper -type Method -output method_enum.go"; DO NOT EDIT. package compress @@ -7,58 +7,47 @@ import ( "strings" ) -const ( - _MethodName_0 = "NONE" - _MethodLowerName_0 = "none" - _MethodName_1 = "LZ4" - _MethodLowerName_1 = "lz4" - _MethodName_2 = "ZSTD" - _MethodLowerName_2 = "zstd" -) +const _MethodName = "NONELZ4LZ4HCZSTD" -var ( - _MethodIndex_0 = [...]uint8{0, 4} - _MethodIndex_1 = [...]uint8{0, 3} - _MethodIndex_2 = [...]uint8{0, 4} -) +var _MethodIndex = [...]uint8{0, 4, 7, 12, 16} + +const _MethodLowerName = "nonelz4lz4hczstd" func (i Method) String() string { - switch { - case i == 2: - return _MethodName_0 - case i == 130: - return _MethodName_1 - case i == 144: - return _MethodName_2 - default: + if i >= Method(len(_MethodIndex)-1) { return fmt.Sprintf("Method(%d)", i) } + return _MethodName[_MethodIndex[i]:_MethodIndex[i+1]] } // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. func _MethodNoOp() { var x [1]struct{} - _ = x[None-(2)] - _ = x[LZ4-(130)] - _ = x[ZSTD-(144)] + _ = x[None-(0)] + _ = x[LZ4-(1)] + _ = x[LZ4HC-(2)] + _ = x[ZSTD-(3)] } -var _MethodValues = []Method{None, LZ4, ZSTD} +var _MethodValues = []Method{None, LZ4, LZ4HC, ZSTD} var _MethodNameToValueMap = map[string]Method{ - _MethodName_0[0:4]: None, - _MethodLowerName_0[0:4]: None, - _MethodName_1[0:3]: LZ4, - _MethodLowerName_1[0:3]: LZ4, - _MethodName_2[0:4]: ZSTD, - _MethodLowerName_2[0:4]: ZSTD, + _MethodName[0:4]: None, + _MethodLowerName[0:4]: None, + _MethodName[4:7]: LZ4, + _MethodLowerName[4:7]: LZ4, + _MethodName[7:12]: LZ4HC, + _MethodLowerName[7:12]: LZ4HC, + _MethodName[12:16]: ZSTD, + _MethodLowerName[12:16]: ZSTD, } var _MethodNames = []string{ - _MethodName_0[0:4], - _MethodName_1[0:3], - _MethodName_2[0:4], + _MethodName[0:4], + _MethodName[4:7], + _MethodName[7:12], + _MethodName[12:16], } // MethodString retrieves an enum value from the enum constants string name. diff --git a/vendor/github.com/ClickHouse/ch-go/compress/reader.go b/vendor/github.com/ClickHouse/ch-go/compress/reader.go index 6a26f9df..fc4a0e6b 100644 --- a/vendor/github.com/ClickHouse/ch-go/compress/reader.go +++ b/vendor/github.com/ClickHouse/ch-go/compress/reader.go @@ -70,8 +70,8 @@ func (r *Reader) readBlock() error { DataSize: dataSize, }, "mismatch") } - switch m := Method(r.header[hMethod]); m { - case LZ4: + switch m := methodEncoding(r.header[hMethod]); m { + case encodedLZ4: // == encodedLZ4HC, as decompression is similar for both n, err := lz4.UncompressBlock(r.raw[headerSize:], r.data) if err != nil { return errors.Wrap(err, "uncompress") @@ -81,7 +81,7 @@ func (r *Reader) readBlock() error { n, dataSize, ) } - case ZSTD: + case encodedZSTD: if r.zstd == nil { // Lazily initializing to prevent spawning goroutines in NewReader. // See https://github.com/golang/go/issues/47056#issuecomment-997436820 @@ -104,7 +104,7 @@ func (r *Reader) readBlock() error { ) } r.data = data - case None: + case encodedNone: copy(r.data, r.raw[headerSize:]) default: return errors.Errorf("compression 0x%02x not implemented", m) diff --git a/vendor/github.com/ClickHouse/ch-go/compress/writer.go b/vendor/github.com/ClickHouse/ch-go/compress/writer.go index 6094b055..a8d26806 100644 --- a/vendor/github.com/ClickHouse/ch-go/compress/writer.go +++ b/vendor/github.com/ClickHouse/ch-go/compress/writer.go @@ -2,6 +2,7 @@ package compress import ( "encoding/binary" + "math" "github.com/go-faster/city" "github.com/go-faster/errors" @@ -9,30 +10,45 @@ import ( "github.com/pierrec/lz4/v4" ) +const ( + LevelZero Level = 0 + LevelLZ4HCDefault Level = 9 + LevelLZ4HCMax Level = 12 +) + // Writer encodes compressed blocks. type Writer struct { Data []byte - lz4 *lz4.Compressor - zstd *zstd.Encoder + method Method + + lz4 *lz4.Compressor + lz4hc *lz4.CompressorHC + zstd *zstd.Encoder } // Compress buf into Data. -func (w *Writer) Compress(m Method, buf []byte) error { +func (w *Writer) Compress(buf []byte) error { maxSize := lz4.CompressBlockBound(len(buf)) w.Data = append(w.Data[:0], make([]byte, maxSize+headerSize)...) _ = w.Data[:headerSize] - w.Data[hMethod] = byte(m) + w.Data[hMethod] = byte(methodTable[w.method]) var n int - switch m { + switch w.method { case LZ4: compressedSize, err := w.lz4.CompressBlock(buf, w.Data[headerSize:]) if err != nil { return errors.Wrap(err, "block") } n = compressedSize + case LZ4HC: + compressedSize, err := w.lz4hc.CompressBlock(buf, w.Data[headerSize:]) + if err != nil { + return errors.Wrap(err, "block") + } + n = compressedSize case ZSTD: w.Data = w.zstd.EncodeAll(buf, w.Data[:headerSize]) n = len(w.Data) - headerSize @@ -40,8 +56,12 @@ func (w *Writer) Compress(m Method, buf []byte) error { n = copy(w.Data[headerSize:], buf) } - w.Data = w.Data[:n+headerSize] + // security: https://github.com/ClickHouse/ch-go/pull/1041 + if uint64(n)+uint64(compressHeaderSize) > math.MaxUint32 { + return errors.New("compressed size overflows uint32") + } + w.Data = w.Data[:n+headerSize] binary.LittleEndian.PutUint32(w.Data[hRawSize:], uint32(n+compressHeaderSize)) binary.LittleEndian.PutUint32(w.Data[hDataSize:], uint32(len(buf))) h := city.CH128(w.Data[hMethod:]) @@ -51,17 +71,40 @@ func (w *Writer) Compress(m Method, buf []byte) error { return nil } -func NewWriter() *Writer { - w, err := zstd.NewWriter(nil, - zstd.WithEncoderLevel(zstd.SpeedDefault), - zstd.WithEncoderConcurrency(1), - zstd.WithLowerEncoderMem(true), - ) - if err != nil { - panic(err) +// NewWriter creates a new Writer with the specified compression level that supports the specified method. +func NewWriter(l Level, m Method) *Writer { + var err error + var zstdWriter *zstd.Encoder + var lz4Writer *lz4.Compressor + var lz4hcWriter *lz4.CompressorHC + + switch m { + case LZ4: + lz4Writer = &lz4.Compressor{} + case LZ4HC: + levelLZ4HC := l + if levelLZ4HC == 0 { + levelLZ4HC = LevelLZ4HCDefault + } else { + levelLZ4HC = Level(math.Min(float64(levelLZ4HC), float64(LevelLZ4HCMax))) + } + lz4hcWriter = &lz4.CompressorHC{Level: lz4.CompressionLevel(1 << (8 + levelLZ4HC))} + case ZSTD: + zstdWriter, err = zstd.NewWriter(nil, + zstd.WithEncoderLevel(zstd.SpeedDefault), + zstd.WithEncoderConcurrency(1), + zstd.WithLowerEncoderMem(true), + ) + if err != nil { + panic(err) + } + default: } + return &Writer{ - lz4: &lz4.Compressor{}, - zstd: w, + method: m, + lz4: lz4Writer, + lz4hc: lz4hcWriter, + zstd: zstdWriter, } } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/block.go b/vendor/github.com/ClickHouse/ch-go/proto/block.go index 34548067..2e8fb883 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/block.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/block.go @@ -183,6 +183,38 @@ func (b Block) EncodeRawBlock(buf *Buffer, version int, input []InputColumn) err return nil } +func (b Block) WriteBlock(w *Writer, version int, input []InputColumn) error { + w.ChainBuffer(func(buf *Buffer) { + if FeatureBlockInfo.In(version) { + b.Info.Encode(buf) + } + buf.PutInt(b.Columns) + buf.PutInt(b.Rows) + }) + + for _, col := range input { + if r := col.Data.Rows(); r != b.Rows { + return errors.Errorf("%q has %d rows, expected %d", col.Name, r, b.Rows) + } + w.ChainBuffer(func(buf *Buffer) { + col.EncodeStart(buf, version) + }) + if v, ok := col.Data.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrapf(err, "prepare %q", col.Name) + } + } + if col.Data.Rows() == 0 { + continue + } + if v, ok := col.Data.(StateEncoder); ok { + w.ChainBuffer(v.EncodeState) + } + col.Data.WriteColumn(w) + } + return nil +} + // This constrains can prevent accidental OOM and allow early detection // of erroneous column or row count. // diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go index ad574368..9b7639bf 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go @@ -130,6 +130,12 @@ func (c ColArr[T]) EncodeColumn(b *Buffer) { c.Data.EncodeColumn(b) } +// WriteColumn implements ColInput. +func (c ColArr[T]) WriteColumn(w *Writer) { + c.Offsets.WriteColumn(w) + c.Data.WriteColumn(w) +} + // Append appends new row to column. func (c *ColArr[T]) Append(v []T) { c.Data.AppendArr(v) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_arr_go123.go b/vendor/github.com/ClickHouse/ch-go/proto/col_arr_go123.go new file mode 100644 index 00000000..c421b419 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_arr_go123.go @@ -0,0 +1,22 @@ +//go:build go1.23 + +package proto + +import "iter" + +// RowRange returns a [iter.Seq] iterator over i-th row. +func (c ColArr[T]) RowRange(i int) iter.Seq[T] { + var start int + end := int(c.Offsets[i]) + if i > 0 { + start = int(c.Offsets[i-1]) + } + + return func(yield func(T) bool) { + for idx := start; idx < end; idx++ { + if !yield(c.Data.Row(idx)) { + return + } + } + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go index 2ed9b8ea..d5f614de 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go @@ -1,6 +1,8 @@ package proto import ( + "reflect" + "strconv" "strings" "github.com/go-faster/errors" @@ -36,20 +38,8 @@ func (c *ColAuto) Infer(t ColumnType) error { switch t { case ColumnTypeNothing: c.Data = new(ColNothing) - case ColumnTypeNullable.Sub(ColumnTypeNothing): - c.Data = new(ColNothing).Nullable() - case ColumnTypeArray.Sub(ColumnTypeNothing): - c.Data = new(ColNothing).Array() case ColumnTypeString: c.Data = new(ColStr) - case ColumnTypeArray.Sub(ColumnTypeString): - c.Data = new(ColStr).Array() - case ColumnTypeNullable.Sub(ColumnTypeString): - c.Data = new(ColStr).Nullable() - case ColumnTypeLowCardinality.Sub(ColumnTypeString): - c.Data = new(ColStr).LowCardinality() - case ColumnTypeArray.Sub(ColumnTypeLowCardinality.Sub(ColumnTypeString)): - c.Data = new(ColStr).LowCardinality().Array() case ColumnTypeBool: c.Data = new(ColBool) case ColumnTypeDateTime: @@ -60,12 +50,50 @@ func (c *ColAuto) Infer(t ColumnType) error { c.Data = NewMap[string, string](new(ColStr), new(ColStr)) case ColumnTypeUUID: c.Data = new(ColUUID) - case ColumnTypeArray.Sub(ColumnTypeUUID): - c.Data = new(ColUUID).Array() - case ColumnTypeNullable.Sub(ColumnTypeUUID): - c.Data = new(ColUUID).Nullable() default: switch t.Base() { + case ColumnTypeArray: + inner := new(ColAuto) + if err := inner.Infer(t.Elem()); err != nil { + return errors.Wrap(err, "array") + } + innerValue := reflect.ValueOf(inner.Data) + arrayMethod := innerValue.MethodByName("Array") + if arrayMethod.IsValid() && arrayMethod.Type().NumOut() == 1 { + if col, ok := arrayMethod.Call(nil)[0].Interface().(Column); ok { + c.Data = col + c.DataType = t + return nil + } + } + case ColumnTypeNullable: + inner := new(ColAuto) + if err := inner.Infer(t.Elem()); err != nil { + return errors.Wrap(err, "nullable") + } + innerValue := reflect.ValueOf(inner.Data) + nullableMethod := innerValue.MethodByName("Nullable") + if nullableMethod.IsValid() && nullableMethod.Type().NumOut() == 1 { + if col, ok := nullableMethod.Call(nil)[0].Interface().(Column); ok { + c.Data = col + c.DataType = t + return nil + } + } + case ColumnTypeLowCardinality: + inner := new(ColAuto) + if err := inner.Infer(t.Elem()); err != nil { + return errors.Wrap(err, "low cardinality") + } + innerValue := reflect.ValueOf(inner.Data) + lowCardinalityMethod := innerValue.MethodByName("LowCardinality") + if lowCardinalityMethod.IsValid() && lowCardinalityMethod.Type().NumOut() == 1 { + if col, ok := lowCardinalityMethod.Call(nil)[0].Interface().(Column); ok { + c.Data = col + c.DataType = t + return nil + } + } case ColumnTypeDateTime: v := new(ColDateTime) if err := v.Infer(t); err != nil { @@ -74,6 +102,49 @@ func (c *ColAuto) Infer(t ColumnType) error { c.Data = v c.DataType = t return nil + case ColumnTypeDecimal: + var prec int + precStr, _, _ := strings.Cut(string(t.Elem()), ",") + if precStr != "" { + var err error + precStr = strings.TrimSpace(precStr) + prec, err = strconv.Atoi(precStr) + if err != nil { + return errors.Wrap(err, "decimal") + } + } else { + prec = 10 + } + switch { + case prec >= 1 && prec < 10: + c.Data = new(ColDecimal32) + case prec >= 10 && prec < 19: + c.Data = new(ColDecimal64) + case prec >= 19 && prec < 39: + c.Data = new(ColDecimal128) + case prec >= 39 && prec < 77: + c.Data = new(ColDecimal256) + default: + return errors.Errorf("decimal precision %d out of range", prec) + } + c.DataType = t + return nil + case ColumnTypeDecimal32: + c.Data = new(ColDecimal32) + c.DataType = t + return nil + case ColumnTypeDecimal64: + c.Data = new(ColDecimal64) + c.DataType = t + return nil + case ColumnTypeDecimal128: + c.Data = new(ColDecimal128) + c.DataType = t + return nil + case ColumnTypeDecimal256: + c.Data = new(ColDecimal256) + c.DataType = t + return nil case ColumnTypeEnum8, ColumnTypeEnum16: v := new(ColEnum) if err := v.Infer(t); err != nil { @@ -122,3 +193,7 @@ func (c ColAuto) Reset() { func (c ColAuto) EncodeColumn(b *Buffer) { c.Data.EncodeColumn(b) } + +func (c ColAuto) WriteColumn(w *Writer) { + c.Data.WriteColumn(w) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go index 70928c65..b297d927 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go @@ -4,154 +4,54 @@ package proto func inferGenerated(t ColumnType) Column { switch t { - case ColumnTypeArray.Sub(ColumnTypeFloat32): - return new(ColFloat32).Array() - case ColumnTypeNullable.Sub(ColumnTypeFloat32): - return new(ColFloat32).Nullable() case ColumnTypeFloat32: return new(ColFloat32) - case ColumnTypeArray.Sub(ColumnTypeFloat64): - return new(ColFloat64).Array() - case ColumnTypeNullable.Sub(ColumnTypeFloat64): - return new(ColFloat64).Nullable() case ColumnTypeFloat64: return new(ColFloat64) - case ColumnTypeArray.Sub(ColumnTypeIPv4): - return new(ColIPv4).Array() - case ColumnTypeNullable.Sub(ColumnTypeIPv4): - return new(ColIPv4).Nullable() case ColumnTypeIPv4: return new(ColIPv4) - case ColumnTypeArray.Sub(ColumnTypeIPv6): - return new(ColIPv6).Array() - case ColumnTypeNullable.Sub(ColumnTypeIPv6): - return new(ColIPv6).Nullable() case ColumnTypeIPv6: return new(ColIPv6) - case ColumnTypeArray.Sub(ColumnTypeDate): - return new(ColDate).Array() - case ColumnTypeNullable.Sub(ColumnTypeDate): - return new(ColDate).Nullable() case ColumnTypeDate: return new(ColDate) - case ColumnTypeArray.Sub(ColumnTypeDate32): - return new(ColDate32).Array() - case ColumnTypeNullable.Sub(ColumnTypeDate32): - return new(ColDate32).Nullable() case ColumnTypeDate32: return new(ColDate32) - case ColumnTypeArray.Sub(ColumnTypeInt8): - return new(ColInt8).Array() - case ColumnTypeNullable.Sub(ColumnTypeInt8): - return new(ColInt8).Nullable() case ColumnTypeInt8: return new(ColInt8) - case ColumnTypeArray.Sub(ColumnTypeUInt8): - return new(ColUInt8).Array() - case ColumnTypeNullable.Sub(ColumnTypeUInt8): - return new(ColUInt8).Nullable() case ColumnTypeUInt8: return new(ColUInt8) - case ColumnTypeArray.Sub(ColumnTypeInt16): - return new(ColInt16).Array() - case ColumnTypeNullable.Sub(ColumnTypeInt16): - return new(ColInt16).Nullable() case ColumnTypeInt16: return new(ColInt16) - case ColumnTypeArray.Sub(ColumnTypeUInt16): - return new(ColUInt16).Array() - case ColumnTypeNullable.Sub(ColumnTypeUInt16): - return new(ColUInt16).Nullable() case ColumnTypeUInt16: return new(ColUInt16) - case ColumnTypeArray.Sub(ColumnTypeInt32): - return new(ColInt32).Array() - case ColumnTypeNullable.Sub(ColumnTypeInt32): - return new(ColInt32).Nullable() case ColumnTypeInt32: return new(ColInt32) - case ColumnTypeArray.Sub(ColumnTypeUInt32): - return new(ColUInt32).Array() - case ColumnTypeNullable.Sub(ColumnTypeUInt32): - return new(ColUInt32).Nullable() case ColumnTypeUInt32: return new(ColUInt32) - case ColumnTypeArray.Sub(ColumnTypeInt64): - return new(ColInt64).Array() - case ColumnTypeNullable.Sub(ColumnTypeInt64): - return new(ColInt64).Nullable() case ColumnTypeInt64: return new(ColInt64) - case ColumnTypeArray.Sub(ColumnTypeUInt64): - return new(ColUInt64).Array() - case ColumnTypeNullable.Sub(ColumnTypeUInt64): - return new(ColUInt64).Nullable() case ColumnTypeUInt64: return new(ColUInt64) - case ColumnTypeArray.Sub(ColumnTypeInt128): - return new(ColInt128).Array() - case ColumnTypeNullable.Sub(ColumnTypeInt128): - return new(ColInt128).Nullable() case ColumnTypeInt128: return new(ColInt128) - case ColumnTypeArray.Sub(ColumnTypeUInt128): - return new(ColUInt128).Array() - case ColumnTypeNullable.Sub(ColumnTypeUInt128): - return new(ColUInt128).Nullable() case ColumnTypeUInt128: return new(ColUInt128) - case ColumnTypeArray.Sub(ColumnTypeInt256): - return new(ColInt256).Array() - case ColumnTypeNullable.Sub(ColumnTypeInt256): - return new(ColInt256).Nullable() case ColumnTypeInt256: return new(ColInt256) - case ColumnTypeArray.Sub(ColumnTypeUInt256): - return new(ColUInt256).Array() - case ColumnTypeNullable.Sub(ColumnTypeUInt256): - return new(ColUInt256).Nullable() case ColumnTypeUInt256: return new(ColUInt256) - case ColumnTypeArray.Sub(ColumnTypeFixedString.With("8")): - return new(ColFixedStr8).Array() - case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("8")): - return new(ColFixedStr8).Nullable() case ColumnTypeFixedString.With("8"): return new(ColFixedStr8) - case ColumnTypeArray.Sub(ColumnTypeFixedString.With("16")): - return new(ColFixedStr16).Array() - case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("16")): - return new(ColFixedStr16).Nullable() case ColumnTypeFixedString.With("16"): return new(ColFixedStr16) - case ColumnTypeArray.Sub(ColumnTypeFixedString.With("32")): - return new(ColFixedStr32).Array() - case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("32")): - return new(ColFixedStr32).Nullable() case ColumnTypeFixedString.With("32"): return new(ColFixedStr32) - case ColumnTypeArray.Sub(ColumnTypeFixedString.With("64")): - return new(ColFixedStr64).Array() - case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("64")): - return new(ColFixedStr64).Nullable() case ColumnTypeFixedString.With("64"): return new(ColFixedStr64) - case ColumnTypeArray.Sub(ColumnTypeFixedString.With("128")): - return new(ColFixedStr128).Array() - case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("128")): - return new(ColFixedStr128).Nullable() case ColumnTypeFixedString.With("128"): return new(ColFixedStr128) - case ColumnTypeArray.Sub(ColumnTypeFixedString.With("256")): - return new(ColFixedStr256).Array() - case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("256")): - return new(ColFixedStr256).Nullable() case ColumnTypeFixedString.With("256"): return new(ColFixedStr256) - case ColumnTypeArray.Sub(ColumnTypeFixedString.With("512")): - return new(ColFixedStr512).Array() - case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("512")): - return new(ColFixedStr512).Nullable() case ColumnTypeFixedString.With("512"): return new(ColFixedStr512) default: diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go index 3e998e46..38cc73d3 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go @@ -42,3 +42,11 @@ func (c *ColBool) DecodeColumn(r *Reader, rows int) error { *c = v return nil } + +// WriteColumn encodes ColBool rows to *Writer. +func (c ColBool) WriteColumn(w *Writer) { + if len(c) == 0 { + return + } + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go index 92cac707..c42966ea 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go @@ -34,3 +34,13 @@ func (c *ColBool) DecodeColumn(r *Reader, rows int) error { } return nil } + +// WriteColumn writes Bool rows to *Writer. +func (c ColBool) WriteColumn(w *Writer) { + if len(c) == 0 { + return + } + s := *(*slice)(unsafe.Pointer(&c)) // #nosec G103 + src := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103 + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go index 5bf75b3b..4483739a 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go @@ -20,7 +20,7 @@ func (c ColDate) Row(i int) time.Time { return c[i].Time() } -// LowCardinality returns LowCardinality for Enum8 . +// LowCardinality returns LowCardinality for Enum8. func (c *ColDate) LowCardinality() *ColLowCardinality[time.Time] { return &ColLowCardinality[time.Time]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go index 38f1a916..8daa68b7 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go @@ -20,7 +20,7 @@ func (c ColDate32) Row(i int) time.Time { return c[i].Time() } -// LowCardinality returns LowCardinality for Enum8 . +// LowCardinality returns LowCardinality for Enum8. func (c *ColDate32) LowCardinality() *ColLowCardinality[time.Time] { return &ColLowCardinality[time.Time]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go index 7e6ac3d6..3a1f6404 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDate32) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDate32) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go index 2690a318..f4de4604 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDate32) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDate32) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go index 49bb89b5..7b9cfc33 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDate) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDate) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go index 980d8b4c..022a7147 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDate) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDate) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go index 4243f2ba..f8f8d97f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go @@ -61,6 +61,10 @@ func (c ColDateTime) Row(i int) time.Time { return c.Data[i].Time().In(c.loc()) } +func (c *ColDateTime) AppendRaw(v DateTime) { + c.Data = append(c.Data, v) +} + func (c *ColDateTime) Append(v time.Time) { c.Data = append(c.Data, ToDateTime(v)) } @@ -75,7 +79,7 @@ func (c *ColDateTime) AppendArr(vs []time.Time) { c.Data = append(c.Data, dates...) } -// LowCardinality returns LowCardinality for Enum8 . +// LowCardinality returns LowCardinality for Enum8. func (c *ColDateTime) LowCardinality() *ColLowCardinality[time.Time] { return &ColLowCardinality[time.Time]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go index f4d96a49..12506e40 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go @@ -61,11 +61,10 @@ func (c *ColDateTime64) Infer(t ColumnType) error { if elem == "" { return errors.Errorf("invalid DateTime64: no elements in %q", t) } - elems := strings.SplitN(elem, ",", 2) - for i := range elems { - elems[i] = strings.Trim(elems[i], `' `) - } - n, err := strconv.ParseUint(elems[0], 10, 8) + pStr, locStr, hasloc := strings.Cut(elem, ",") + pStr = strings.Trim(pStr, `' `) + locStr = strings.Trim(locStr, `' `) + n, err := strconv.ParseUint(pStr, 10, 8) if err != nil { return errors.Wrap(err, "parse precision") } @@ -75,8 +74,8 @@ func (c *ColDateTime64) Infer(t ColumnType) error { } c.Precision = p c.PrecisionSet = true - if len(elems) > 1 { - loc, err := time.LoadLocation(elems[1]) + if hasloc { + loc, err := time.LoadLocation(locStr) if err != nil { return errors.Wrap(err, "invalid location") } @@ -126,6 +125,10 @@ func (c ColDateTime64) Raw() *ColDateTime64Raw { return &ColDateTime64Raw{ColDateTime64: c} } +func (c *ColDateTime64) Nullable() *ColNullable[time.Time] { + return &ColNullable[time.Time]{Values: c} +} + func (c *ColDateTime64) Array() *ColArr[time.Time] { return &ColArr[time.Time]{Data: c} } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go index ccff09d2..69ac2f84 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDateTime64) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDateTime64) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go index 4eeeaf5a..22e5052e 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDateTime64) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDateTime64) WriteColumn(w *Writer) { + v := c.Data + if len(v) == 0 { + return + } + const size = 64 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go index 20e9aad8..2eca6eb3 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDateTime) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDateTime) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go index 40a056c5..b4214756 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDateTime) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDateTime) WriteColumn(w *Writer) { + v := c.Data + if len(v) == 0 { + return + } + const size = 32 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go index 18811acd..775acbbb 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go @@ -42,7 +42,7 @@ func (c *ColDecimal128) AppendArr(vs []Decimal128) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Decimal128 . +// LowCardinality returns LowCardinality for Decimal128. func (c *ColDecimal128) LowCardinality() *ColLowCardinality[Decimal128] { return &ColLowCardinality[Decimal128]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go index 58c02eb2..fa8498d4 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDecimal128) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDecimal128) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go index 1b2fe125..9a4070f1 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDecimal128) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDecimal128) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go index ad96b27a..7bb2ffaa 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go @@ -42,7 +42,7 @@ func (c *ColDecimal256) AppendArr(vs []Decimal256) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Decimal256 . +// LowCardinality returns LowCardinality for Decimal256. func (c *ColDecimal256) LowCardinality() *ColLowCardinality[Decimal256] { return &ColLowCardinality[Decimal256]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go index 301b7b14..d7b37f79 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDecimal256) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDecimal256) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go index b0d694ef..57419fb6 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDecimal256) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDecimal256) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 256 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go index 2c4f4eaf..e26ec6f5 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go @@ -42,7 +42,7 @@ func (c *ColDecimal32) AppendArr(vs []Decimal32) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Decimal32 . +// LowCardinality returns LowCardinality for Decimal32. func (c *ColDecimal32) LowCardinality() *ColLowCardinality[Decimal32] { return &ColLowCardinality[Decimal32]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go index 44cb9f7a..9935f75f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDecimal32) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDecimal32) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go index eaed3dff..07845255 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDecimal32) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDecimal32) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go index c37ffcd3..bfb06b93 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go @@ -42,7 +42,7 @@ func (c *ColDecimal64) AppendArr(vs []Decimal64) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Decimal64 . +// LowCardinality returns LowCardinality for Decimal64. func (c *ColDecimal64) LowCardinality() *ColLowCardinality[Decimal64] { return &ColLowCardinality[Decimal64]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go index a0934c68..9bb72cce 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go @@ -53,3 +53,7 @@ func (c ColDecimal64) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColDecimal64) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go index f5ba1b2d..c4824fc1 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColDecimal64) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColDecimal64) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go index f4af963b..f798ec90 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go @@ -63,14 +63,12 @@ func (e *ColEnum) parse(t ColumnType) error { for _, elem := range strings.Split(elements, ",") { def := strings.TrimSpace(elem) // 'hello' = 1 - parts := strings.SplitN(def, "=", 2) - if len(parts) != 2 { + left, right, hascomma := strings.Cut(def, "=") + if !hascomma { return errors.Errorf("bad enum definition %q", def) } - var ( - left = strings.TrimSpace(parts[0]) // 'hello' - right = strings.TrimSpace(parts[1]) // 1 - ) + left = strings.TrimSpace(left) // 'hello' + right = strings.TrimSpace(right) // 1 idx, err := strconv.Atoi(right) if err != nil { return errors.Errorf("bad right side of definition %q", right) @@ -169,4 +167,8 @@ func (e *ColEnum) EncodeColumn(b *Buffer) { e.raw().EncodeColumn(b) } +func (e *ColEnum) WriteColumn(w *Writer) { + e.raw().WriteColumn(w) +} + func (e *ColEnum) Type() ColumnType { return e.t } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go index 3f99c642..406e2323 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go @@ -42,7 +42,7 @@ func (c *ColEnum16) AppendArr(vs []Enum16) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Enum16 . +// LowCardinality returns LowCardinality for Enum16. func (c *ColEnum16) LowCardinality() *ColLowCardinality[Enum16] { return &ColLowCardinality[Enum16]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go index bf3b0122..11b3ff08 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go @@ -53,3 +53,7 @@ func (c ColEnum16) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColEnum16) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go index 52757012..36bcabfa 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColEnum16) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColEnum16) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go index a063eaf8..c9c3e438 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go @@ -42,7 +42,7 @@ func (c *ColEnum8) AppendArr(vs []Enum8) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Enum8 . +// LowCardinality returns LowCardinality for Enum8. func (c *ColEnum8) LowCardinality() *ColLowCardinality[Enum8] { return &ColLowCardinality[Enum8]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go index edf5712c..e8747e3f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go @@ -42,3 +42,7 @@ func (c ColEnum8) EncodeColumn(b *Buffer) { b.Buf[i+start] = uint8(v[i]) } } + +func (c ColEnum8) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go index 09e6fe2b..7d3b3807 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go @@ -37,3 +37,12 @@ func (c ColEnum8) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColEnum8) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + src := *(*[]byte)(unsafe.Pointer(&v)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go index 982cfa1b..7ae816e6 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go @@ -86,6 +86,11 @@ func (c *ColFixedStr) DecodeColumn(r *Reader, rows int) error { return nil } +// WriteColumn writes ColFixedStr rows to *Writer. +func (c ColFixedStr) WriteColumn(w *Writer) { + w.ChainWrite(c.Buf) +} + // Array returns new Array(FixedString). func (c *ColFixedStr) Array() *ColArr[[]byte] { return &ColArr[[]byte]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go index cb769537..130b7be6 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go @@ -42,7 +42,7 @@ func (c *ColFixedStr128) AppendArr(vs [][128]byte) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for FixedStr128 . +// LowCardinality returns LowCardinality for FixedStr128. func (c *ColFixedStr128) LowCardinality() *ColLowCardinality[[128]byte] { return &ColLowCardinality[[128]byte]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go index edf7f9cd..3ecd33c0 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go @@ -53,3 +53,7 @@ func (c ColFixedStr128) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFixedStr128) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go index 46ee96c7..6cba7dbb 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFixedStr128) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFixedStr128) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 128 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go index adfc2de1..765a4182 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go @@ -42,7 +42,7 @@ func (c *ColFixedStr16) AppendArr(vs [][16]byte) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for FixedStr16 . +// LowCardinality returns LowCardinality for FixedStr16. func (c *ColFixedStr16) LowCardinality() *ColLowCardinality[[16]byte] { return &ColLowCardinality[[16]byte]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go index 4a9313ab..030b49ce 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go @@ -53,3 +53,7 @@ func (c ColFixedStr16) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFixedStr16) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go index 5d0dbeee..0b1e411c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFixedStr16) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFixedStr16) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 16 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go index 1e2d9559..da652618 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go @@ -42,7 +42,7 @@ func (c *ColFixedStr256) AppendArr(vs [][256]byte) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for FixedStr256 . +// LowCardinality returns LowCardinality for FixedStr256. func (c *ColFixedStr256) LowCardinality() *ColLowCardinality[[256]byte] { return &ColLowCardinality[[256]byte]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go index bb961f8b..a4b8a5bf 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go @@ -53,3 +53,7 @@ func (c ColFixedStr256) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFixedStr256) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go index 277ac598..318908ac 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFixedStr256) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFixedStr256) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 256 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go index 90adba9e..052bae3a 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go @@ -42,7 +42,7 @@ func (c *ColFixedStr32) AppendArr(vs [][32]byte) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for FixedStr32 . +// LowCardinality returns LowCardinality for FixedStr32. func (c *ColFixedStr32) LowCardinality() *ColLowCardinality[[32]byte] { return &ColLowCardinality[[32]byte]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go index cdaf62d0..7816cb5f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go @@ -53,3 +53,7 @@ func (c ColFixedStr32) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFixedStr32) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go index 3777e5e8..7e1386a2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFixedStr32) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFixedStr32) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 32 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go index 09837fa8..529ba31f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go @@ -42,7 +42,7 @@ func (c *ColFixedStr512) AppendArr(vs [][512]byte) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for FixedStr512 . +// LowCardinality returns LowCardinality for FixedStr512. func (c *ColFixedStr512) LowCardinality() *ColLowCardinality[[512]byte] { return &ColLowCardinality[[512]byte]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go index aa8ea319..b3cf5740 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go @@ -53,3 +53,7 @@ func (c ColFixedStr512) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFixedStr512) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go index 970ca0f0..58bd05c2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFixedStr512) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFixedStr512) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 512 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go index 38849ccd..ad317153 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go @@ -42,7 +42,7 @@ func (c *ColFixedStr64) AppendArr(vs [][64]byte) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for FixedStr64 . +// LowCardinality returns LowCardinality for FixedStr64. func (c *ColFixedStr64) LowCardinality() *ColLowCardinality[[64]byte] { return &ColLowCardinality[[64]byte]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go index 89c1f24e..4af33c0f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go @@ -53,3 +53,7 @@ func (c ColFixedStr64) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFixedStr64) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go index 62ec09e5..34a10785 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFixedStr64) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFixedStr64) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 64 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go index a58723ee..a233df76 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go @@ -42,7 +42,7 @@ func (c *ColFixedStr8) AppendArr(vs [][8]byte) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for FixedStr8 . +// LowCardinality returns LowCardinality for FixedStr8. func (c *ColFixedStr8) LowCardinality() *ColLowCardinality[[8]byte] { return &ColLowCardinality[[8]byte]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go index 086ea6fc..d830f164 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go @@ -53,3 +53,7 @@ func (c ColFixedStr8) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFixedStr8) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go index 9991c06e..56cf28f7 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFixedStr8) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFixedStr8) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go index 7031f111..01a35f30 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go @@ -42,7 +42,7 @@ func (c *ColFloat32) AppendArr(vs []float32) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Float32 . +// LowCardinality returns LowCardinality for Float32. func (c *ColFloat32) LowCardinality() *ColLowCardinality[float32] { return &ColLowCardinality[float32]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go index f400aef2..dde651fb 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go @@ -54,3 +54,7 @@ func (c ColFloat32) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFloat32) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go index 2ded35f8..f1d20f40 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFloat32) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFloat32) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go index c210eb8d..4df408c0 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go @@ -42,7 +42,7 @@ func (c *ColFloat64) AppendArr(vs []float64) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Float64 . +// LowCardinality returns LowCardinality for Float64. func (c *ColFloat64) LowCardinality() *ColLowCardinality[float64] { return &ColLowCardinality[float64]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go index 68281aec..9bdaa816 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go @@ -54,3 +54,7 @@ func (c ColFloat64) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColFloat64) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go index f16fd396..16e29743 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColFloat64) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColFloat64) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go index 5e982c4f..3ff6e76f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go @@ -42,7 +42,7 @@ func (c *ColInt128) AppendArr(vs []Int128) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Int128 . +// LowCardinality returns LowCardinality for Int128. func (c *ColInt128) LowCardinality() *ColLowCardinality[Int128] { return &ColLowCardinality[Int128]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go index 5902d3f9..b38a58da 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go @@ -53,3 +53,7 @@ func (c ColInt128) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColInt128) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go index c5862fff..80985010 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColInt128) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColInt128) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go index 212801d9..d2d0795c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go @@ -42,7 +42,7 @@ func (c *ColInt16) AppendArr(vs []int16) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Int16 . +// LowCardinality returns LowCardinality for Int16. func (c *ColInt16) LowCardinality() *ColLowCardinality[int16] { return &ColLowCardinality[int16]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go index 75523a44..2c85f106 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go @@ -53,3 +53,7 @@ func (c ColInt16) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColInt16) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go index 6ba5e50c..b994fb41 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColInt16) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColInt16) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go index 5d7454b5..05c5073f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go @@ -42,7 +42,7 @@ func (c *ColInt256) AppendArr(vs []Int256) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Int256 . +// LowCardinality returns LowCardinality for Int256. func (c *ColInt256) LowCardinality() *ColLowCardinality[Int256] { return &ColLowCardinality[Int256]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go index 0b9f8f1c..9c0589d6 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go @@ -53,3 +53,7 @@ func (c ColInt256) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColInt256) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go index 2433bc92..3c49eadb 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColInt256) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColInt256) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 256 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go index 46b09585..63d4cddc 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go @@ -42,7 +42,7 @@ func (c *ColInt32) AppendArr(vs []int32) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Int32 . +// LowCardinality returns LowCardinality for Int32. func (c *ColInt32) LowCardinality() *ColLowCardinality[int32] { return &ColLowCardinality[int32]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go index 52f78c14..54c6a2c2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go @@ -53,3 +53,7 @@ func (c ColInt32) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColInt32) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go index b2e10256..8236c9d5 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColInt32) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColInt32) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go index 4c8875c1..01061c0c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go @@ -42,7 +42,7 @@ func (c *ColInt64) AppendArr(vs []int64) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Int64 . +// LowCardinality returns LowCardinality for Int64. func (c *ColInt64) LowCardinality() *ColLowCardinality[int64] { return &ColLowCardinality[int64]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go index 400367d3..e2dba725 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go @@ -53,3 +53,7 @@ func (c ColInt64) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColInt64) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go index 5c6f2658..aa15d1ea 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColInt64) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColInt64) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go index 98a71a28..32f30039 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go @@ -42,7 +42,7 @@ func (c *ColInt8) AppendArr(vs []int8) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for Int8 . +// LowCardinality returns LowCardinality for Int8. func (c *ColInt8) LowCardinality() *ColLowCardinality[int8] { return &ColLowCardinality[int8]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go index a79459d2..343b6132 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go @@ -42,3 +42,7 @@ func (c ColInt8) EncodeColumn(b *Buffer) { b.Buf[i+start] = uint8(v[i]) } } + +func (c ColInt8) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go index 1c62c7db..daa67152 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go @@ -37,3 +37,12 @@ func (c ColInt8) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColInt8) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + src := *(*[]byte)(unsafe.Pointer(&v)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go b/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go index 57bb2e39..c9ecb4c4 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go @@ -110,3 +110,7 @@ func (c *ColInterval) Reset() { func (c ColInterval) EncodeColumn(b *Buffer) { c.Values.EncodeColumn(b) } + +func (c ColInterval) WriteColumn(w *Writer) { + c.Values.WriteColumn(w) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go index 4c7a0bc9..7559e79c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go @@ -42,7 +42,7 @@ func (c *ColIPv4) AppendArr(vs []IPv4) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for IPv4 . +// LowCardinality returns LowCardinality for IPv4. func (c *ColIPv4) LowCardinality() *ColLowCardinality[IPv4] { return &ColLowCardinality[IPv4]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go index 8b0b7902..cd687b2e 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go @@ -53,3 +53,7 @@ func (c ColIPv4) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColIPv4) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go index 5fc0b7c7..1039fc15 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColIPv4) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColIPv4) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go index 5907bd71..cff4ee95 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go @@ -42,7 +42,7 @@ func (c *ColIPv6) AppendArr(vs []IPv6) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for IPv6 . +// LowCardinality returns LowCardinality for IPv6. func (c *ColIPv6) LowCardinality() *ColLowCardinality[IPv6] { return &ColLowCardinality[IPv6]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go index 9a5870d2..a2d317f9 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go @@ -53,3 +53,7 @@ func (c ColIPv6) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColIPv6) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go index 5650b498..e4fe0a02 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColIPv6) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColIPv6) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_json_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_json_str.go new file mode 100644 index 00000000..069a5260 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_json_str.go @@ -0,0 +1,164 @@ +package proto + +import ( + "github.com/go-faster/errors" +) + +const JSONStringSerializationVersion uint64 = 1 + +// ColJSONStr represents String column. +// +// Use ColJSONBytes for []bytes ColumnOf implementation. +type ColJSONStr struct { + Str ColStr +} + +// Append string to column. +func (c *ColJSONStr) Append(v string) { + c.Str.Append(v) +} + +// AppendBytes append byte slice as string to column. +func (c *ColJSONStr) AppendBytes(v []byte) { + c.Str.AppendBytes(v) +} + +func (c *ColJSONStr) AppendArr(v []string) { + c.Str.AppendArr(v) +} + +// Compile-time assertions for ColJSONStr. +var ( + _ ColInput = ColJSONStr{} + _ ColResult = (*ColJSONStr)(nil) + _ Column = (*ColJSONStr)(nil) + _ ColumnOf[string] = (*ColJSONStr)(nil) + _ Arrayable[string] = (*ColJSONStr)(nil) + _ StateEncoder = (*ColJSONStr)(nil) + _ StateDecoder = (*ColJSONStr)(nil) +) + +// Type returns ColumnType of JSON. +func (ColJSONStr) Type() ColumnType { + return ColumnTypeJSON +} + +// Rows returns count of rows in column. +func (c ColJSONStr) Rows() int { + return c.Str.Rows() +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColJSONStr) Reset() { + c.Str.Reset() +} + +// EncodeState encodes the JSON serialization version +func (c *ColJSONStr) EncodeState(b *Buffer) { + b.PutUInt64(JSONStringSerializationVersion) +} + +// EncodeColumn encodes String rows to *Buffer. +func (c ColJSONStr) EncodeColumn(b *Buffer) { + c.Str.EncodeColumn(b) +} + +// WriteColumn writes JSON rows to *Writer. +func (c ColJSONStr) WriteColumn(w *Writer) { + c.Str.WriteColumn(w) +} + +// ForEach calls f on each string from column. +func (c ColJSONStr) ForEach(f func(i int, s string) error) error { + return c.Str.ForEach(f) +} + +// First returns the first row of the column. +func (c ColJSONStr) First() string { + return c.Str.First() +} + +// Row returns row with number i. +func (c ColJSONStr) Row(i int) string { + return c.Str.Row(i) +} + +// RowBytes returns row with number i as byte slice. +func (c ColJSONStr) RowBytes(i int) []byte { + return c.Str.RowBytes(i) +} + +// ForEachBytes calls f on each string from column as byte slice. +func (c ColJSONStr) ForEachBytes(f func(i int, b []byte) error) error { + return c.Str.ForEachBytes(f) +} + +// DecodeState decodes the JSON serialization version +func (c *ColJSONStr) DecodeState(r *Reader) error { + jsonSerializationVersion, err := r.UInt64() + if err != nil { + return errors.Wrap(err, "failed to read json serialization version") + } + + if jsonSerializationVersion != JSONStringSerializationVersion { + return errors.Errorf("received invalid JSON string serialization version %d. Setting \"output_format_native_write_json_as_string\" must be enabled.", jsonSerializationVersion) + } + + return nil +} + +// DecodeColumn decodes String rows from *Reader. +func (c *ColJSONStr) DecodeColumn(r *Reader, rows int) error { + return c.Str.DecodeColumn(r, rows) +} + +// LowCardinality returns LowCardinality(JSON). +func (c *ColJSONStr) LowCardinality() *ColLowCardinality[string] { + return c.Str.LowCardinality() +} + +// Array is helper that creates Array(JSON). +func (c *ColJSONStr) Array() *ColArr[string] { + return c.Str.Array() +} + +// Nullable is helper that creates Nullable(JSON). +func (c *ColJSONStr) Nullable() *ColNullable[string] { + return c.Str.Nullable() +} + +// ColJSONBytes is ColJSONStr wrapper to be ColumnOf for []byte. +type ColJSONBytes struct { + ColJSONStr +} + +// Row returns row with number i. +func (c ColJSONBytes) Row(i int) []byte { + return c.RowBytes(i) +} + +// Append byte slice to column. +func (c *ColJSONBytes) Append(v []byte) { + c.AppendBytes(v) +} + +// AppendArr append slice of byte slices to column. +func (c *ColJSONBytes) AppendArr(v [][]byte) { + for _, s := range v { + c.Append(s) + } +} + +// Array is helper that creates Array(JSON). +func (c *ColJSONBytes) Array() *ColArr[[]byte] { + return &ColArr[[]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable(JSON). +func (c *ColJSONBytes) Nullable() *ColNullable[[]byte] { + return &ColNullable[[]byte]{ + Values: c, + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go index ffed5809..4471fb89 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go @@ -230,6 +230,41 @@ func (c *ColLowCardinality[T]) EncodeColumn(b *Buffer) { } } +func (c *ColLowCardinality[T]) WriteColumn(w *Writer) { + // Using pointer receiver as Prepare() is expected to be called before + // encoding. + + if c.Rows() == 0 { + // Skipping encoding entirely. + return + } + + w.ChainBuffer(func(b *Buffer) { + // Meta encodes whether reader should update + // low cardinality metadata and keys column type. + meta := cardinalityUpdateAll | int64(c.key) + b.PutInt64(meta) + + // Writing index (dictionary). + b.PutInt64(int64(c.index.Rows())) + }) + c.index.WriteColumn(w) + + w.ChainBuffer(func(b *Buffer) { + b.PutInt64(int64(c.Rows())) + }) + switch c.key { + case KeyUInt8: + c.keys8.WriteColumn(w) + case KeyUInt16: + c.keys16.WriteColumn(w) + case KeyUInt32: + c.keys32.WriteColumn(w) + case KeyUInt64: + c.keys64.WriteColumn(w) + } +} + func (c *ColLowCardinality[T]) Reset() { for k := range c.kv { delete(c.kv, k) @@ -286,17 +321,6 @@ func (c ColLowCardinality[T]) Rows() int { // Prepare column for ingestion. func (c *ColLowCardinality[T]) Prepare() error { - // Select minimum possible size for key. - if n := len(c.Values); n < math.MaxUint8 { - c.key = KeyUInt8 - } else if n < math.MaxUint16 { - c.key = KeyUInt16 - } else if uint32(n) < math.MaxUint32 { - c.key = KeyUInt32 - } else { - c.key = KeyUInt64 - } - // Allocate keys slice. c.keys = append(c.keys[:0], make([]int, len(c.Values))...) if c.kv == nil { @@ -317,6 +341,17 @@ func (c *ColLowCardinality[T]) Prepare() error { c.keys[i] = idx } + // Select minimum possible size for key. + if n := last; n < math.MaxUint8 { + c.key = KeyUInt8 + } else if n < math.MaxUint16 { + c.key = KeyUInt16 + } else if uint32(n) < math.MaxUint32 { + c.key = KeyUInt32 + } else { + c.key = KeyUInt64 + } + // Fill key column with key indexes. switch c.key { case KeyUInt8: diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go index 665dc20c..99286345 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go @@ -155,3 +155,28 @@ func (c ColLowCardinalityRaw) EncodeColumn(b *Buffer) { b.PutInt64(int64(k.Rows())) k.EncodeColumn(b) } + +func (c ColLowCardinalityRaw) WriteColumn(w *Writer) { + if c.Rows() == 0 { + // Skipping encoding entirely. + return + } + + w.ChainBuffer(func(b *Buffer) { + // Meta encodes whether reader should update + // low cardinality metadata and keys column type. + meta := cardinalityUpdateAll | int64(c.Key) + b.PutInt64(meta) + + // Writing index (dictionary). + b.PutInt64(int64(c.Index.Rows())) + }) + c.Index.WriteColumn(w) + + // Sequence of values as indexes in dictionary. + k := c.Keys() + w.ChainBuffer(func(b *Buffer) { + b.PutInt64(int64(k.Rows())) + }) + k.WriteColumn(w) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_map.go b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go index 90925fb2..e27781dd 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_map.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go @@ -164,6 +164,16 @@ func (c ColMap[K, V]) EncodeColumn(b *Buffer) { c.Values.EncodeColumn(b) } +func (c ColMap[K, V]) WriteColumn(w *Writer) { + if c.Rows() == 0 { + return + } + + c.Offsets.WriteColumn(w) + c.Keys.WriteColumn(w) + c.Values.WriteColumn(w) +} + // Prepare ensures Preparable column propagation. func (c ColMap[K, V]) Prepare() error { if v, ok := c.Keys.(Preparable); ok { @@ -181,18 +191,18 @@ func (c ColMap[K, V]) Prepare() error { // Infer ensures Inferable column propagation. func (c *ColMap[K, V]) Infer(t ColumnType) error { - elems := strings.Split(string(t.Elem()), ",") - if len(elems) != 2 { + keytype, valtype, hascomma := strings.Cut(string(t.Elem()), ",") + if !hascomma || strings.ContainsRune(valtype, ',') { return errors.New("invalid map type") } if v, ok := c.Keys.(Inferable); ok { - ct := ColumnType(strings.TrimSpace(elems[0])) + ct := ColumnType(strings.TrimSpace(keytype)) if err := v.Infer(ct); err != nil { return errors.Wrap(err, "infer data") } } if v, ok := c.Values.(Inferable); ok { - ct := ColumnType(strings.TrimSpace(elems[1])) + ct := ColumnType(strings.TrimSpace(valtype)) if err := v.Infer(ct); err != nil { return errors.Wrap(err, "infer data") } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_map_go123.go b/vendor/github.com/ClickHouse/ch-go/proto/col_map_go123.go new file mode 100644 index 00000000..e9d83c58 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_map_go123.go @@ -0,0 +1,25 @@ +//go:build go1.23 + +package proto + +import "iter" + +// RowRange returns a [iter.Seq2] iterator over i-th row. +func (c ColMap[K, V]) RowRange(i int) iter.Seq2[K, V] { + var start int + end := int(c.Offsets[i]) + if i > 0 { + start = int(c.Offsets[i-1]) + } + + return func(yield func(K, V) bool) { + for idx := start; idx < end; idx++ { + if !yield( + c.Keys.Row(idx), + c.Values.Row(idx), + ) { + return + } + } + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go index 1a825091..d72eeba6 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go @@ -71,3 +71,7 @@ func (c ColNothing) EncodeColumn(b *Buffer) { } b.PutRaw(make([]byte, c)) } + +func (c ColNothing) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go index 516245f0..fd3615d2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go @@ -117,6 +117,12 @@ func (c ColNullable[T]) Row(i int) Nullable[T] { } } +func (c *ColNullable[T]) Array() *ColArr[Nullable[T]] { + return &ColArr[Nullable[T]]{ + Data: c, + } +} + func (c *ColNullable[T]) Reset() { c.Nulls.Reset() c.Values.Reset() @@ -127,6 +133,11 @@ func (c ColNullable[T]) EncodeColumn(b *Buffer) { c.Values.EncodeColumn(b) } +func (c ColNullable[T]) WriteColumn(w *Writer) { + c.Nulls.WriteColumn(w) + c.Values.WriteColumn(w) +} + func (c ColNullable[T]) IsElemNull(i int) bool { if i < c.Rows() { return c.Nulls[i] == boolTrue diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_point.go b/vendor/github.com/ClickHouse/ch-go/proto/col_point.go index 0e1549ff..5d7834f8 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_point.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_point.go @@ -61,3 +61,8 @@ func (c ColPoint) EncodeColumn(b *Buffer) { c.X.EncodeColumn(b) c.Y.EncodeColumn(b) } + +func (c ColPoint) WriteColumn(w *Writer) { + c.X.WriteColumn(w) + c.Y.WriteColumn(w) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go index 325a17b3..d56e3571 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go @@ -82,3 +82,17 @@ func (c *ColRawOf[X]) DecodeColumn(r *Reader, rows int) error { } return nil } + +// WriteColumn write ColRawOf rows to *Writer. +func (c ColRawOf[X]) WriteColumn(w *Writer) { + if len(c) == 0 { + return + } + var x X + size := unsafe.Sizeof(x) // #nosec G103 + s := *(*slice)(unsafe.Pointer(&c)) // #nosec G103 + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103 + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go index 8f48ad7f..9786f0ee 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_str.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go @@ -76,6 +76,20 @@ func (c ColStr) EncodeColumn(b *Buffer) { } } +// WriteColumn writes String rows to *Writer. +func (c ColStr) WriteColumn(w *Writer) { + buf := make([]byte, binary.MaxVarintLen64) + // Writing values from c.Buf directly might improve performance if [ColStr] contains a few rows of very long strings. + // However, most of the time it is quite opposite, so we copy data. + w.ChainBuffer(func(b *Buffer) { + for _, p := range c.Pos { + n := binary.PutUvarint(buf, uint64(p.End-p.Start)) + b.PutRaw(buf[:n]) + b.PutRaw(c.Buf[p.Start:p.End]) + } + }) +} + // ForEach calls f on each string from column. func (c ColStr) ForEach(f func(i int, s string) error) error { return c.ForEachBytes(func(i int, b []byte) error { diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go b/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go index ac7ad6c8..7ee1bef0 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go @@ -167,3 +167,9 @@ func (c ColTuple) EncodeColumn(b *Buffer) { v.EncodeColumn(b) } } + +func (c ColTuple) WriteColumn(w *Writer) { + for _, v := range c { + v.WriteColumn(w) + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go index e34f07e9..58b83a34 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go @@ -42,7 +42,7 @@ func (c *ColUInt128) AppendArr(vs []UInt128) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for UInt128 . +// LowCardinality returns LowCardinality for UInt128. func (c *ColUInt128) LowCardinality() *ColLowCardinality[UInt128] { return &ColLowCardinality[UInt128]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go index bbe55dce..785cc9c4 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go @@ -53,3 +53,7 @@ func (c ColUInt128) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColUInt128) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go index 5989b5f3..4ee0aa00 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColUInt128) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColUInt128) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 128 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go index 7bc8ba63..4d41de4a 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go @@ -42,7 +42,7 @@ func (c *ColUInt16) AppendArr(vs []uint16) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for UInt16 . +// LowCardinality returns LowCardinality for UInt16. func (c *ColUInt16) LowCardinality() *ColLowCardinality[uint16] { return &ColLowCardinality[uint16]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go index 219f3a6c..3d45fbef 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go @@ -53,3 +53,7 @@ func (c ColUInt16) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColUInt16) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go index d98d9534..126c46ec 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColUInt16) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColUInt16) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 16 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go index b68a119e..0c7d9122 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go @@ -42,7 +42,7 @@ func (c *ColUInt256) AppendArr(vs []UInt256) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for UInt256 . +// LowCardinality returns LowCardinality for UInt256. func (c *ColUInt256) LowCardinality() *ColLowCardinality[UInt256] { return &ColLowCardinality[UInt256]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go index 68633e19..4dda2228 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go @@ -53,3 +53,7 @@ func (c ColUInt256) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColUInt256) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go index 02488d3f..df657cc9 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColUInt256) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColUInt256) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 256 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go index 41abca5b..4f205d22 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go @@ -42,7 +42,7 @@ func (c *ColUInt32) AppendArr(vs []uint32) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for UInt32 . +// LowCardinality returns LowCardinality for UInt32. func (c *ColUInt32) LowCardinality() *ColLowCardinality[uint32] { return &ColLowCardinality[uint32]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go index 0bc7de95..11ebb114 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go @@ -53,3 +53,7 @@ func (c ColUInt32) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColUInt32) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go index 3ddfa760..fad9cb13 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColUInt32) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColUInt32) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 32 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go index 4521cd45..f3471d18 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go @@ -42,7 +42,7 @@ func (c *ColUInt64) AppendArr(vs []uint64) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for UInt64 . +// LowCardinality returns LowCardinality for UInt64. func (c *ColUInt64) LowCardinality() *ColLowCardinality[uint64] { return &ColLowCardinality[uint64]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go index deea8a48..ada64d55 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go @@ -53,3 +53,7 @@ func (c ColUInt64) EncodeColumn(b *Buffer) { offset += size } } + +func (c ColUInt64) WriteColumn(w *Writer) { + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go index 664f80f0..bb73e18e 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go @@ -43,3 +43,18 @@ func (c ColUInt64) EncodeColumn(b *Buffer) { dst := b.Buf[offset:] copy(dst, src) } + +func (c ColUInt64) WriteColumn(w *Writer) { + v := c + if len(v) == 0 { + return + } + const size = 64 / 8 + + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + + src := *(*[]byte)(unsafe.Pointer(&s)) + w.ChainWrite(src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go index 02c05161..e34a0675 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go @@ -42,7 +42,7 @@ func (c *ColUInt8) AppendArr(vs []uint8) { *c = append(*c, vs...) } -// LowCardinality returns LowCardinality for UInt8 . +// LowCardinality returns LowCardinality for UInt8. func (c *ColUInt8) LowCardinality() *ColLowCardinality[uint8] { return &ColLowCardinality[uint8]{ index: c, diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go index ec5ff190..2acccc6a 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go @@ -31,3 +31,7 @@ func (c ColUInt8) EncodeColumn(b *Buffer) { } b.Buf = append(b.Buf, v...) } + +func (c ColUInt8) WriteColumn(w *Writer) { + w.ChainWrite([]byte(c)) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go index 8de94088..d74508b5 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go @@ -34,3 +34,12 @@ func (c ColUUID) EncodeColumn(b *Buffer) { } bswap.Swap64(b.Buf) // BE <-> LE } + +// WriteColumn encodes ColUUID rows to *Writer. +func (c ColUUID) WriteColumn(w *Writer) { + if len(c) == 0 { + return + } + // Can't write UUID as-is: bswap is required. + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go index 18fa73fc..877bc4d4 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go @@ -47,3 +47,12 @@ func (c ColUUID) EncodeColumn(b *Buffer) { copy(dst, src) bswap.Swap64(dst) // BE <-> LE } + +// WriteColumn encodes ColUUID rows to *Writer. +func (c ColUUID) WriteColumn(w *Writer) { + if len(c) == 0 { + return + } + // Can't write UUID as-is: bswap is required. + w.ChainBuffer(c.EncodeColumn) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/column.go b/vendor/github.com/ClickHouse/ch-go/proto/column.go index 1cdeb31b..4f0dbff0 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/column.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/column.go @@ -2,6 +2,7 @@ package proto import ( "fmt" + "strconv" "strings" "github.com/go-faster/errors" @@ -12,6 +13,7 @@ type ColInput interface { Type() ColumnType Rows() int EncodeColumn(b *Buffer) + WriteColumn(w *Writer) } // ColResult column. @@ -73,8 +75,8 @@ func (c ColumnType) Base() ColumnType { } var ( v = string(c) - start = strings.Index(v, "(") - end = strings.LastIndex(v, ")") + start = strings.IndexByte(v, '(') + end = strings.LastIndexByte(v, ')') ) if start <= 0 || end <= 0 || end < start { return c @@ -82,30 +84,64 @@ func (c ColumnType) Base() ColumnType { return c[:start] } +// reduces Decimal(P, ...) to Decimal32/Decimal64/Decimal128/Decimal256 +// returns c if any errors occur during conversion +func (c ColumnType) decimalDowncast() ColumnType { + if c.Base() != ColumnTypeDecimal { + return c + } + elem := c.Elem() + precStr, _, _ := strings.Cut(string(elem), ",") + precStr = strings.TrimSpace(precStr) + prec, err := strconv.Atoi(precStr) + if err != nil { + return c + } + switch { + case prec < 10: + return ColumnTypeDecimal32 + case prec < 19: + return ColumnTypeDecimal64 + case prec < 39: + return ColumnTypeDecimal128 + case prec < 77: + return ColumnTypeDecimal256 + default: + return c + } +} + // Conflicts reports whether two types conflict. func (c ColumnType) Conflicts(b ColumnType) bool { if c == b { return false } - { - a := c - if b.Base() == ColumnTypeEnum8 || b.Base() == ColumnTypeEnum16 { - a, b = b, a - } - switch { - case a.Base() == ColumnTypeEnum8 && b == ColumnTypeInt8: - return false - case a.Base() == ColumnTypeEnum16 && b == ColumnTypeInt16: - return false - } + cBase := c.Base() + bBase := b.Base() + if (cBase == ColumnTypeEnum8 && b == ColumnTypeInt8) || + (cBase == ColumnTypeEnum16 && b == ColumnTypeInt16) || + (bBase == ColumnTypeEnum8 && c == ColumnTypeInt8) || + (bBase == ColumnTypeEnum16 && c == ColumnTypeInt16) { + return false + } + if cBase == ColumnTypeDecimal || bBase == ColumnTypeDecimal { + return c.decimalDowncast() != b.decimalDowncast() } - if c.Base() != b.Base() { + + if cBase != bBase { return true } + switch cBase { + case ColumnTypeEnum8, ColumnTypeEnum16: + return false + } + if c.normalizeCommas() == b.normalizeCommas() { return false } - switch c.Base() { + switch cBase { + case ColumnTypeArray, ColumnTypeNullable, ColumnTypeLowCardinality: + return c.Elem().Conflicts(b.Elem()) case ColumnTypeDateTime, ColumnTypeDateTime64: // TODO(ernado): improve check return false @@ -149,8 +185,8 @@ func (c ColumnType) Elem() ColumnType { } var ( v = string(c) - start = strings.Index(v, "(") - end = strings.LastIndex(v, ")") + start = strings.IndexByte(v, '(') + end = strings.LastIndexByte(v, ')') ) if start <= 0 || end <= 0 || end < start { // No element. @@ -206,6 +242,7 @@ const ( ColumnTypeBool ColumnType = "Bool" ColumnTypeTuple ColumnType = "Tuple" ColumnTypeNullable ColumnType = "Nullable" + ColumnTypeDecimal ColumnType = "Decimal" ColumnTypeDecimal32 ColumnType = "Decimal32" ColumnTypeDecimal64 ColumnType = "Decimal64" ColumnTypeDecimal128 ColumnType = "Decimal128" @@ -213,6 +250,7 @@ const ( ColumnTypePoint ColumnType = "Point" ColumnTypeInterval ColumnType = "Interval" ColumnTypeNothing ColumnType = "Nothing" + ColumnTypeJSON ColumnType = "JSON" ) // colWrap wraps Column with type t. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/date.go b/vendor/github.com/ClickHouse/ch-go/proto/date.go index 3f850327..44a3d58c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/date.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/date.go @@ -33,6 +33,9 @@ func (d Date) String() string { // ToDate returns Date of time.Time. func ToDate(t time.Time) Date { + if t.IsZero() { + return 0 + } _, offset := t.Zone() return Date((t.Unix() + int64(offset)) / secInDay) } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/date32.go b/vendor/github.com/ClickHouse/ch-go/proto/date32.go index 6c1330f7..e67f69d2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/date32.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/date32.go @@ -25,6 +25,9 @@ func (d Date32) String() string { // ToDate32 returns Date32 of time.Time. func ToDate32(t time.Time) Date32 { + if t.IsZero() { + return 0 + } _, offset := t.Zone() return Date32((t.Unix() + int64(offset)) / secInDay) } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/datetime.go b/vendor/github.com/ClickHouse/ch-go/proto/datetime.go index 4917339e..a7f48e33 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/datetime.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/datetime.go @@ -15,9 +15,6 @@ func ToDateTime(t time.Time) DateTime { // Time returns DateTime as time.Time. func (d DateTime) Time() time.Time { - if d == 0 { - return time.Time{} - } // https://clickhouse.com/docs/en/sql-reference/data-types/datetime/#usage-remarks // ClickHouse stores UTC timestamps that are timezone-agnostic. return time.Unix(int64(d), 0) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/datetime64.go b/vendor/github.com/ClickHouse/ch-go/proto/datetime64.go index 6d2a9997..f60ba49b 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/datetime64.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/datetime64.go @@ -57,9 +57,6 @@ func ToDateTime64(t time.Time, p Precision) DateTime64 { // Time returns DateTime64 as time.Time. func (d DateTime64) Time(p Precision) time.Time { - if d == 0 { - return time.Time{} - } nsec := int64(d) * p.Scale() return time.Unix(nsec/1e9, nsec%1e9) } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/feature.go b/vendor/github.com/ClickHouse/ch-go/proto/feature.go index b06deac7..68cdffa5 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/feature.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/feature.go @@ -32,6 +32,7 @@ const ( FeatureAddendum Feature = 54458 FeatureParameters Feature = 54459 FeatureServerQueryTimeInProgress Feature = 54460 + FeatureJSONStrings Feature = 54475 ) // Version reports protocol version when Feature was introduced. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go index 95b453f3..280aa077 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/feature_enum.go @@ -7,8 +7,8 @@ import ( "strings" ) -const _FeatureName = "TempTablesBlockInfoTimezoneQuotaKeyInClientInfoDisplayNameVersionPatchServerLogsColumnDefaultsMetadataClientWriteInfoSettingsSerializedAsStringsInterServerSecretOpenTelemetryXForwardedForInClientInfoRefererInClientInfoDistributedDepthQueryStartTimeProfileEventsParallelReplicasCustomSerializationQuotaKeyParametersServerQueryTimeInProgress" -const _FeatureLowerName = "temptablesblockinfotimezonequotakeyinclientinfodisplaynameversionpatchserverlogscolumndefaultsmetadataclientwriteinfosettingsserializedasstringsinterserversecretopentelemetryxforwardedforinclientinforefererinclientinfodistributeddepthquerystarttimeprofileeventsparallelreplicascustomserializationquotakeyparametersserverquerytimeinprogress" +const _FeatureName = "TempTablesBlockInfoTimezoneQuotaKeyInClientInfoDisplayNameVersionPatchServerLogsColumnDefaultsMetadataClientWriteInfoSettingsSerializedAsStringsInterServerSecretOpenTelemetryXForwardedForInClientInfoRefererInClientInfoDistributedDepthQueryStartTimeProfileEventsParallelReplicasCustomSerializationQuotaKeyParametersServerQueryTimeInProgressJSONStrings" +const _FeatureLowerName = "temptablesblockinfotimezonequotakeyinclientinfodisplaynameversionpatchserverlogscolumndefaultsmetadataclientwriteinfosettingsserializedasstringsinterserversecretopentelemetryxforwardedforinclientinforefererinclientinfodistributeddepthquerystarttimeprofileeventsparallelreplicascustomserializationquotakeyparametersserverquerytimeinprogressjsonstrings" var _FeatureMap = map[Feature]string{ 50264: _FeatureName[0:10], @@ -33,6 +33,7 @@ var _FeatureMap = map[Feature]string{ 54458: _FeatureName[296:304], 54459: _FeatureName[304:314], 54460: _FeatureName[314:339], + 54475: _FeatureName[339:350], } func (i Feature) String() string { @@ -68,9 +69,10 @@ func _FeatureNoOp() { _ = x[FeatureQuotaKey-(54458)] _ = x[FeatureParameters-(54459)] _ = x[FeatureServerQueryTimeInProgress-(54460)] + _ = x[FeatureJSONStrings-(54475)] } -var _FeatureValues = []Feature{FeatureTempTables, FeatureBlockInfo, FeatureTimezone, FeatureQuotaKeyInClientInfo, FeatureDisplayName, FeatureVersionPatch, FeatureServerLogs, FeatureColumnDefaultsMetadata, FeatureClientWriteInfo, FeatureSettingsSerializedAsStrings, FeatureInterServerSecret, FeatureOpenTelemetry, FeatureXForwardedForInClientInfo, FeatureRefererInClientInfo, FeatureDistributedDepth, FeatureQueryStartTime, FeatureProfileEvents, FeatureParallelReplicas, FeatureCustomSerialization, FeatureQuotaKey, FeatureParameters, FeatureServerQueryTimeInProgress} +var _FeatureValues = []Feature{FeatureTempTables, FeatureBlockInfo, FeatureTimezone, FeatureQuotaKeyInClientInfo, FeatureDisplayName, FeatureVersionPatch, FeatureServerLogs, FeatureColumnDefaultsMetadata, FeatureClientWriteInfo, FeatureSettingsSerializedAsStrings, FeatureInterServerSecret, FeatureOpenTelemetry, FeatureXForwardedForInClientInfo, FeatureRefererInClientInfo, FeatureDistributedDepth, FeatureQueryStartTime, FeatureProfileEvents, FeatureParallelReplicas, FeatureCustomSerialization, FeatureQuotaKey, FeatureParameters, FeatureServerQueryTimeInProgress, FeatureJSONStrings} var _FeatureNameToValueMap = map[string]Feature{ _FeatureName[0:10]: FeatureTempTables, @@ -117,6 +119,8 @@ var _FeatureNameToValueMap = map[string]Feature{ _FeatureLowerName[304:314]: FeatureParameters, _FeatureName[314:339]: FeatureServerQueryTimeInProgress, _FeatureLowerName[314:339]: FeatureServerQueryTimeInProgress, + _FeatureName[339:350]: FeatureJSONStrings, + _FeatureLowerName[339:350]: FeatureJSONStrings, } var _FeatureNames = []string{ @@ -142,6 +146,7 @@ var _FeatureNames = []string{ _FeatureName[296:304], _FeatureName[304:314], _FeatureName[314:339], + _FeatureName[339:350], } // FeatureString retrieves an enum value from the enum constants string name. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/writer.go b/vendor/github.com/ClickHouse/ch-go/proto/writer.go new file mode 100644 index 00000000..c3ba4c4a --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/writer.go @@ -0,0 +1,73 @@ +package proto + +import ( + "io" + "net" +) + +// Writer is a column writer. +// +// It helps to reduce memory footprint by writing column using vector I/O. +type Writer struct { + conn io.Writer + + buf *Buffer + bufOffset int + needCut bool + + vec net.Buffers +} + +// NewWriter creates new [Writer]. +func NewWriter(conn io.Writer, buf *Buffer) *Writer { + w := &Writer{ + conn: conn, + buf: buf, + vec: make(net.Buffers, 0, 16), + } + return w +} + +// ChainWrite adds buffer to the vector to write later. +// +// Passed byte slice may be captured until [Writer.Flush] is called. +func (w *Writer) ChainWrite(data []byte) { + w.cutBuffer() + w.vec = append(w.vec, data) +} + +// ChainBuffer creates a temporary buffer and adds it to the vector to write later. +// +// Data is not written immediately, call [Writer.Flush] to flush data. +// +// NB: do not retain buffer. +func (w *Writer) ChainBuffer(cb func(*Buffer)) { + cb(w.buf) +} + +func (w *Writer) cutBuffer() { + newOffset := len(w.buf.Buf) + data := w.buf.Buf[w.bufOffset:newOffset:newOffset] + if len(data) == 0 { + return + } + w.bufOffset = newOffset + w.vec = append(w.vec, data) +} + +func (w *Writer) reset() { + w.bufOffset = 0 + w.needCut = false + w.buf.Reset() + // Do not hold references, to avoid memory leaks. + clear(w.vec) + w.vec = w.vec[:0] +} + +// Flush flushes all data to writer. +func (w *Writer) Flush() (n int64, err error) { + w.cutBuffer() + n, err = w.vec.WriteTo(w.conn) + w.reset() + return n, err +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore b/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore index 108aa399..2e8e75ba 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore @@ -27,6 +27,7 @@ _testmain.go coverage.txt .idea/** +.vscode/** dev/* .run/** @@ -36,3 +37,6 @@ vendor .terraform.lock.hcl **/.terraform* pipeline.auto.tfvars +*.tfvars + +.env diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml b/vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml index 23e116a4..44520afc 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/.golangci.yaml @@ -66,6 +66,6 @@ linters: - gofmt - govet - ineffassign - - importas + - imports - misspell - staticcheck diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md index bcfe72fd..6337527c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md @@ -1,3 +1,464 @@ +# v2.34.0, 2025-04-01 + +## What's Changed + +### Enhancements 🎉 +* allow appending to Decimal column with `string` by @rutaka-n in https://github.com/ClickHouse/clickhouse-go/pull/1532 +* Enhancements for decimal strings by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1535 +### Bug fixes 🐛 +* Add length check to `FixedString` to prevent panic by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1534 +### Other Changes 🛠 +* chore: replace github.com/pkg/errors with stdlib by @hazzik in https://github.com/ClickHouse/clickhouse-go/pull/1530 + +## New Contributors +* @hazzik made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1530 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.33.1...v2.34.0 + +# v2.33.1, 2025-03-18 + +## What's Changed +### Bug fixes 🐛 +* fix concurrent map write errors with context by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1523 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.33.0...v2.33.1 + +# v2.33.0, 2025-03-07 + +## What's Changed +### Enhancements 🎉 +* Add handling for nested structs in named tuples by @Exca-DK in https://github.com/ClickHouse/clickhouse-go/pull/1500 +* Add option to control MaxConnsPerHost for http.Transport by @holycheater in https://github.com/ClickHouse/clickhouse-go/pull/1517 + +### Bug fixes 🐛 +* fix ConnOpenRandom strategy, issue: #1509 by @PalanQu in https://github.com/ClickHouse/clickhouse-go/pull/1510 +* fix: preserve nil elements in map by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1516 + + +## New Contributors +* @PalanQu made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1510 +* @Exca-DK made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1500 +* @holycheater made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1517 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.32.2...v2.33.0 + +# v2.32.2, 2025-02-20 + +## What's Changed +### Bug Fixes 🐛 +* fix: decode prefix of nested columns in Variant by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1506 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.32.1...v2.32.2 + +# v2.32.1, 2025-02-17 + +## What's Changed +### Bug Fixes 🐛 +* fix: enable Array() support for Variant, Dynamic, JSON by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1501 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.32.0...v2.32.1 + +# v2.32.0, 2025-02-14 + +## What's Changed + +### Enhancements 🎉 +* Add LZ4HC compression method by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1499 + +### Performance 🏎️ +* Removed ~1MB of memory usage in compressor by @pablomatiasgomez and @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1499 + +### Bug fixes 🐛 +* Fixed potential security vulnerability in compression block header length overflow by @santrancisco https://github.com/ClickHouse/clickhouse-go/pull/1499 + +### Other Changes 🛠 +* update compressor functions by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1499 +* change container provider, verify close conn error in tests by @pablomatiasgomez in https://github.com/ClickHouse/clickhouse-go/pull/1497 + +## New Contributors +* @pablomatiasgomez made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1497 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.31.0...v2.32.0 + +# v2.31.0, 2025-02-10 + +## What's Changed + +### Enhancements 🎉 +* `clickhouse.JSON` Serializer interface by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1491 +* use unsafe.Slice/unsafe.StringData by @serprex in https://github.com/ClickHouse/clickhouse-go/pull/1493 + +### Other Changes 🛠 +* JSON read/write benchmarks by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1490 + +## New Contributors +* @serprex made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1493 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.30.3...2.31.0 + +# v2.30.3, 2025-02-03 + +## What's Changed +### Other Changes 🛠 +* fix enum parse logic when its last index is zero by @lobachpavel in https://github.com/ClickHouse/clickhouse-go/pull/1487 + +## New Contributors +* @lobachpavel made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1487 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.30.2...v2.30.3 + +# v2.30.2, 2025-01-30 + +## What's Changed +### Bug fixes 🐛 +* Handle json tag without name override by @matevzmihalic in https://github.com/ClickHouse/clickhouse-go/pull/1478 +* fix: JSON NestedMap + add tests by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1482 + +## New Contributors +* @matevzmihalic made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1478 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.30.1...v2.30.2 + +# v2.30.1, 2025-01-17 + +## What's Changed +### Enhancements 🎉 +* [improvement] Some performance related changes to evaluate by @mdonkers in https://github.com/ClickHouse/clickhouse-go/pull/1426 +* Add Variant Type by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1453 +* Add Dynamic Type by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1454 +* Add JSON Type by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1455 +* stdlib sql return precision for DateTime64 by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1469 + +### Fixes 🐛 +* [FIX] Minor updates by @barkhayot in https://github.com/ClickHouse/clickhouse-go/pull/1451 +* Fix file descriptor leak in release utility script by @fengjun2016 in https://github.com/ClickHouse/clickhouse-go/pull/1460 +* Remove forced string conversions for Tuple by @SpencerTorres in https://github.com/ClickHouse/clickhouse-go/pull/1465 + +### Other Changes 🛠 +* [Chore] Use defer to close rows in examples by @hayotbisonai in https://github.com/ClickHouse/clickhouse-go/pull/1431 +* Regenerate certificates used in tests by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1441 + + +## New Contributors +* @hayotbisonai made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1431 +* @barkhayot made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1451 +* @fengjun2016 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1460 +* @SpencerTorres made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1453 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.30.0...v2.30.1 + +# v2.30.0, 2024-10-16 + +## What's Changed +### Enhancements 🎉 +* Extended support for HTTP proxy in driver options by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1424 +* Default implementation of column.IterableOrderedMap by @earwin in https://github.com/ClickHouse/clickhouse-go/pull/1417 +### Fixes 🐛 +* Fix serialization for slices of OrderedMap/IterableOrderedMap (#1365) by @earwin in https://github.com/ClickHouse/clickhouse-go/pull/1418 +* Retry on broken pipe in batch by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1423 +### Other Changes 🛠 +* Add 'clickhouse-go-rows-utils' to third-party libraries by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1413 + +## New Contributors +* @earwin made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1418 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.29.0...v2.30.0 + +# v2.29.0, 2024-09-24 + +## What's Changed +### Enhancements 🎉 +* Add ability to handle context cancellations for TCP protocol by @tinybit in https://github.com/ClickHouse/clickhouse-go/pull/1389 +### Other Changes 🛠 +* Add Examples for batch.Column(n).AppendRow in columnar_insert.go by @achmad-dev in https://github.com/ClickHouse/clickhouse-go/pull/1410 + +## New Contributors +* @achmad-dev made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1410 +* @tinybit made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1389 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.28.3...v2.29.0 + +# v2.28.3, 2024-09-12 + +## What's Changed +### Other Changes 🛠 +* Revert the minimum required Go version to 1.21 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1405 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.28.2...v2.28.3 + +# v2.28.2, 2024-08-30 + +## What's Changed +### Fixes 🐛 +* Validate connection in bad state before query execution in the stdlib database/sql driver by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1396 +### Other Changes 🛠 +* Update README with newer Go versions by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1393 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.28.1...v2.28.2 + +# v2.28.1, 2024-08-27 + +## What's Changed +### Fixes 🐛 +* Recognize empty strings as a valid enum key by @genzgd in https://github.com/ClickHouse/clickhouse-go/pull/1387 +### Other Changes 🛠 +* ClickHouse 24.8 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1385 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.28.0...v2.28.1 + +# v2.28.0, 2024-08-23 + +## What's Changed +### Fixes 🐛 +* Fix Enum column definition parse logic to match ClickHouse spec by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1380 +* Fix support custom serialization in Nested type by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1381 +* Fix panic on nil map append by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1383 +### Other Changes 🛠 +* Remove test coverage for deprecated Object('JSON') type by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1377 +* Remove JSON type use from a context use example by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1379 +* Make sure non-secure port is used during readiness check by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1382 +* Deprecate Go 1.21 ended support and require Go 1.22 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1378 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.27.2...v2.28.0 + +# v2.27.2, 2024-08-20 + +## What's Changed +### Enhancements 🎉 +* Optimize Date/Date32 scan by @ShoshinNikita in https://github.com/ClickHouse/clickhouse-go/pull/1374 +### Fixes 🐛 +* Fix column list parsing for multiline INSERT statements by @Fiery-Fenix in https://github.com/ClickHouse/clickhouse-go/pull/1373 + +## New Contributors +* @Fiery-Fenix made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1373 +* @ShoshinNikita made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1374 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.27.1...v2.27.2 + +# v2.27.1, 2024-08-05 + +## What's Changed +### Fixes 🐛 +* Fix INSERT statement normalization match backtick table name by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1366 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.27.0...v2.27.1 + +# v2.27.0, 2024-08-01 + +## Breaking change notice + +v2.25.0 was released with a breaking change in https://github.com/ClickHouse/clickhouse-go/pull/1306. Please review your implementation. + +## What's Changed +### Enhancements 🎉 +* Unpack value of indirect types in array column to support nested structures in interfaced slices/arrays by @jmaicher in https://github.com/ClickHouse/clickhouse-go/pull/1350 +### Fixes 🐛 +* Common HTTP insert query normalization by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1341 +### Other Changes 🛠 +* Update examples std json by @xjeway in https://github.com/ClickHouse/clickhouse-go/pull/1240 +* ClickHouse 24.6 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1352 +* ClickHouse 24.7 release by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1363 +* Update CHANGELOG with a breaking change note by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1364 + +## New Contributors +* @xjeway made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1240 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.26.0...v2.27.0 + +# v2.26.0, 2024-06-25 + +## What's Changed +### Enhancements 🎉 +* Reintroduce the random connection strategy by @larry-cdn77 in https://github.com/ClickHouse/clickhouse-go/pull/1313 +* Make custom debug log function on-par with the built-in one by @vespian in https://github.com/ClickHouse/clickhouse-go/pull/1317 +* Remove date overflow check as it's normalised by ClickHouse server by @gogingersnap777 in https://github.com/ClickHouse/clickhouse-go/pull/1315 +* Batch: impl `Columns() []column.Interface` method by @egsam98 in https://github.com/ClickHouse/clickhouse-go/pull/1277 +### Fixes 🐛 +* Fix rows.Close do not return too early by @yujiarista in https://github.com/ClickHouse/clickhouse-go/pull/1314 +* Setting `X-Clickhouse-SSL-Certificate-Auth` header correctly given `X-ClickHouse-Key` by @gogingersnap777 in https://github.com/ClickHouse/clickhouse-go/pull/1316 +* Retry on network errors and fix retries on async inserts with `database/sql` interface by @tommyzli in https://github.com/ClickHouse/clickhouse-go/pull/1330 +* BatchInsert parentheses issue fix by @ramzes642 in https://github.com/ClickHouse/clickhouse-go/pull/1327 +### Other Changes 🛠 +* ClickHouse 24.5 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1319 +* Align `allow_suspicious_low_cardinality_types` and `allow_suspicious_low_cardinality_types ` settings in tests due to ClickHouse Cloud incompatibility by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1331 +* Use HTTPs scheme in std connection failover tests by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1332 + +## New Contributors +* @larry-cdn77 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1313 +* @vespian made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1317 +* @gogingersnap777 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1315 +* @yujiarista made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1314 +* @egsam98 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1277 +* @tommyzli made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1330 +* @ramzes642 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1327 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.25.0...v2.26.0 + +# v2.25.0, 2024-05-28 + +## What's Changed +### Breaking Changes 🚨 +* Add a compatibility layer for a database/sql driver to work with sql.NullString and ClickHouse nullable column by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1306 +### Other Changes 🛠 +* Use Go 1.22 in head tests by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1305 +* Skip flaky 1127 test by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1307 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.24.0...v2.25.0 + +# v2.24.0, 2024-05-08 + +## What's Changed +### Enhancements 🎉 +* Always compress responses when the client compression is on by @zhkvia in https://github.com/ClickHouse/clickhouse-go/pull/1286 +* Optional flag to close query with flush by @hongker in https://github.com/ClickHouse/clickhouse-go/pull/1276 +### Fixes 🐛 +* Fix prepare batch does not break on `values` substring in table name by @Wang in https://github.com/ClickHouse/clickhouse-go/pull/1290 +* Fix nil checks when appending slices of pointers by @markandrus in https://github.com/ClickHouse/clickhouse-go/pull/1283 +### Other Changes 🛠 +* Don't recreate keys from LC columns from direct stream by @genzgd in https://github.com/ClickHouse/clickhouse-go/pull/1291 + +## New Contributors +* @zhkvia made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1286 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.23.2...v2.24.0 + +# v2.23.2, 2024-04-25 + +## What's Changed +### Fixes 🐛 +* Fixed panic on concurrent context key map write by @Wang in https://github.com/ClickHouse/clickhouse-go/pull/1284 +### Other Changes 🛠 +* Fix ClickHouse Terraform provider version by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1285 + +## New Contributors +* @Wang made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1284 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.23.1...v2.23.2 + +# v2.23.1, 2024-04-15 + +## What's Changed +### Fixes 🐛 +* Zero-value timestamp to be formatted as toDateTime(0) in bind by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1260 +### Other Changes 🛠 +* Update #1127 test case to reproduce a progress handle when exception is thrown by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1259 +* Set max parallel for GH jobs by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1261 +* Ensure test container termination by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1274 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.23.0...v2.23.1 + +# v2.23.0, 2024-03-27 + +## What's Changed +### Enhancements 🎉 +* Implement `ConnBeginTx` as replacement for deprecated `Begin` by @FelipeLema in https://github.com/ClickHouse/clickhouse-go/pull/1255 +### Other Changes 🛠 +* Align error message assertion to new missing custom setting error formatting by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1256 +* CI chores by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1258 + +## New Contributors +* @FelipeLema made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1255 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.4...v2.23.0 + +# v2.22.4, 2024-03-25 + +## What's Changed +### Fixes 🐛 +* Fix column name with parantheses handle in prepare batch by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1252 +### Other Changes 🛠 +* Fix TestBatchAppendRows work different on cloud by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1251 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.3...v2.22.4 + +# v2.22.3, 2024-03-25 + +## What's Changed +### Fixes 🐛 +* Fix panic on tuple scan on []any by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1249 +### Other Changes 🛠 +* Error channel deadlock fix test case by @threadedstream in https://github.com/ClickHouse/clickhouse-go/pull/1239 +* Add a test case for #1127 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1242 +* Run cloud/head jobs when label by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1250 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.2...v2.22.3 + +# v2.22.2, 2024-03-18 + +## What's Changed +### Fixes 🐛 +* Fix for Map columns with Enums by @leklund in https://github.com/ClickHouse/clickhouse-go/pull/1236 + +## New Contributors +* @leklund made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1236 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.1...v2.22.2 + +# v2.22.1, 2024-03-18 + +## What's Changed +### Fixes 🐛 +* Make errors channel buffered inside query() by @threadedstream in https://github.com/ClickHouse/clickhouse-go/pull/1237 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.22.0...v2.22.1 + +# v2.20.0, 2024-02-28 + +## What's Changed +### Enhancements 🎉 +* Support [n]byte/[]byte type Scan/Append to FixedString column by @rogeryk in https://github.com/ClickHouse/clickhouse-go/pull/1205 +### Other Changes 🛠 +* Enable cloud tests by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1202 +* Removed LowCardinality(UInt64) tests that caused allow_suspicious_low_cardinality_types related error by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1206 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.19.0...v2.20.0 + +# v2.19.0, 2024-02-26 + +## What's Changed +### Enhancements 🎉 +* handle ctx.Done() in acquire by @threadedstream in https://github.com/ClickHouse/clickhouse-go/pull/1199 +### Fixes 🐛 +* Fix panic on format nil *fmt.Stringer type value by @zaneli in https://github.com/ClickHouse/clickhouse-go/pull/1200 +### Other Changes 🛠 +* Update Go/ClickHouse versions by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1201 + +## New Contributors +* @threadedstream made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1199 +* @zaneli made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1200 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.18.0...v2.19.0 + +# v2.18.0, 2024-02-01 + +## What's Changed +### Enhancements 🎉 +* Add WithAllocBufferColStrProvider string column allocator for batch insert performance boost by @hongker in https://github.com/ClickHouse/clickhouse-go/pull/1181 +### Fixes 🐛 +* Fix bind for seconds scale DateTime by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1184 +### Other Changes 🛠 +* resolves #1163 debugF function is not respected by @omurbekjk in https://github.com/ClickHouse/clickhouse-go/pull/1166 + +## New Contributors +* @omurbekjk made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1166 +* @hongker made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1181 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.17.1...v2.18.0 + # v2.17.1, 2023-12-27 ## What's Changed @@ -7,8 +468,8 @@ ## New Contributors * @nityanandagohain made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1168 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.17.0...v2.17.1 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.17.0...v2.17.1 + # v2.17.0, 2023-12-21 ## What's Changed @@ -23,8 +484,8 @@ * @yogasw made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1156 * @aramperes made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1153 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.16.0...v2.17.0 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.16.0...v2.17.0 + # v2.16.0, 2023-12-01 ## What's Changed @@ -37,8 +498,8 @@ * @phil-schreiber made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1148 * @deankarn made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1144 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.15.0...v2.16.0 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.15.0...v2.16.0 + # v2.14.3, 2023-10-12 ## What's Changed @@ -53,8 +514,8 @@ * @rdaniels6813 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1111 * @rutaka-n made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1095 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.2...v2.14.3 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.2...v2.14.3 + # v2.14.2, 2023-10-04 ## What's Changed @@ -71,8 +532,8 @@ * @beck917 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1103 * @srikanthccv made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1085 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.1...v2.14.2 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.1...v2.14.2 + # v2.14.1, 2023-09-14 ## What's Changed @@ -82,8 +543,8 @@ ## New Contributors * @hanjm made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1084 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.0...v2.14.1 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.0...v2.14.1 + # v2.14.0, 2023-09-12 ## What's Changed @@ -104,8 +565,8 @@ * @alrs made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1081 * @testwill made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1080 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.4...v2.14 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.4...v2.14 + # v2.13.4, 2023-08-30 ## What's Changed @@ -113,8 +574,8 @@ * fix(proto): add TCP protocol version in query packet by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1077 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.3...v2.13.4 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.3...v2.13.4 + # v2.13.3, 2023-08-23 ## What's Changed @@ -122,8 +583,8 @@ * fix(column.json): fix bool type handling by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1073 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.2...v2.13.3 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.2...v2.13.3 + # v2.13.2, 2023-08-18 ## What's Changed @@ -133,8 +594,8 @@ * Test against latest and head CH by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1060 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.1...v2.13.2 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.1...v2.13.2 + # v2.13.1, 2023-08-17 ## What's Changed @@ -142,8 +603,8 @@ * fix: native format Date32 representation by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1069 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.0...v2.13.1 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.0...v2.13.1 + # v2.13.0, 2023-08-10 ## What's Changed @@ -158,8 +619,8 @@ ## New Contributors * @ValManP made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1051 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.1...v2.13.0 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.1...v2.13.0 + # v2.12.1, 2023-08-02 ## What's Changed @@ -178,8 +639,8 @@ * @jmaicher made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1046 * @RoryCrispin made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1056 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.0...v2.12.1 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.0...v2.12.1 + # v2.12.0, 2023-07-27 ## What's Changed @@ -191,8 +652,8 @@ ## New Contributors * @sentanos made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1042 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.11.0...v2.12.0 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.11.0...v2.12.0 + # v2.11.0, 2023-07-20 ## What's Changed @@ -209,8 +670,8 @@ * @djosephsen made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/941 * @anjmao made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1029 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.10.1...v2.11.0 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.10.1...v2.11.0 + # v2.10.1, 2023-06-06 ## What's Changed @@ -221,8 +682,8 @@ ## New Contributors * @kokizzu made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1006 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.10.0...v2.10.1 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.10.0...v2.10.1 + # v2.10.0, 2023-05-17 ## What's Changed @@ -236,8 +697,8 @@ ## New Contributors * @stephaniehingtgen made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/998 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.3...v2.10.0 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.3...v2.10.0 + # v2.9.2, 2023-05-08 ## What's Changed @@ -250,8 +711,8 @@ * @candiduslynx made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/984 * @slvrtrn made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/987 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.1...v2.9.2 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.1...v2.9.2 + # v2.9.1, 2023-04-24 ## What's Changed @@ -264,8 +725,8 @@ * @czubocha made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/977 * @hexchain made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/975 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.0...v2.9.1 - +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.9.0...v2.9.1 + # v2.9.0, 2023-04-13 ## What's Changed @@ -283,174 +744,174 @@ ## New Contributors * @xiaochaoren1 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/964 -**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.8.3...v2.9.0 - -## 2.8.3, 2023-04-03 - -### Bug fixes - -- Revert: Expire idle connections no longer acquired during lifetime [#958](https://github.com/ClickHouse/clickhouse-go/pull/958) by @jkaflik - -## 2.8.2, 2023-03-31 - -### Bug fixes - -- Expire idle connections no longer acquired during lifetime [#945](https://github.com/ClickHouse/clickhouse-go/pull/945) by @jkaflik - -## 2.8.1, 2023-03-29 - -### Bug fixes - -- Fix idle connection check for TLS connections [#951](https://github.com/ClickHouse/clickhouse-go/pull/951) by @jkaflik & @alekar - -## 2.8.0, 2023-03-27 - -### New features - -- Support customized "url path" in http connection [#938](https://github.com/ClickHouse/clickhouse-go/pull/938) by @crisismaple -- Allow Auth.Database option to be empty [#926](https://github.com/ClickHouse/clickhouse-go/pull/938) by @v4run - -### Chores - -- Bump github.com/stretchr/testify from 1.8.1 to 1.8.2 [#933](https://github.com/ClickHouse/clickhouse-go/pull/933) -- fix: small typo in the text of an error [#936](https://github.com/ClickHouse/clickhouse-go/pull/936) by @lspgn -- Improved bug template [#916](https://github.com/ClickHouse/clickhouse-go/pull/916) by @mshustov - -## 2.7.0, 2023-03-08 - -### New features - -- Date type with user location [#923](https://github.com/ClickHouse/clickhouse-go/pull/923) by @jkaflik -- Add AppendRow function to BatchColumn [#927](https://github.com/ClickHouse/clickhouse-go/pull/927) by @pikot - -### Bug fixes - -- fix: fix connect.compression's format verb [#924](https://github.com/ClickHouse/clickhouse-go/pull/924) by @mind1949 -- Add extra padding for strings shorter than FixedColumn length [#910](https://github.com/ClickHouse/clickhouse-go/pull/910) by @jkaflik - -### Chore - -- Bump github.com/andybalholm/brotli from 1.0.4 to 1.0.5 [#911](https://github.com/ClickHouse/clickhouse-go/pull/911) -- Bump github.com/paulmach/orb from 0.8.0 to 0.9.0 [#912](https://github.com/ClickHouse/clickhouse-go/pull/912) -- Bump golang.org/x/net from 0.0.0-20220722155237-a158d28d115b to 0.7.0 [#928](https://github.com/ClickHouse/clickhouse-go/pull/928) - -## 2.6.5, 2023-02-28 - -### Bug fixes - -- Fix array parameter formatting in binding mechanism [#921](https://github.com/ClickHouse/clickhouse-go/pull/921) by @genzgd - -## 2.6.4, 2023-02-23 - -### Bug fixes - -- Fixed concurrency issue in stdConnOpener [#918](https://github.com/ClickHouse/clickhouse-go/pull/918) by @jkaflik - -## 2.6.3, 2023-02-22 - -### Bug fixes - -- Fixed `lib/binary/string_safe.go` for non 64bit arch [#914](https://github.com/ClickHouse/clickhouse-go/pull/914) by @atoulme - -## 2.6.2, 2023-02-20 - -### Bug fixes - -- Fix decimal encoding with non-standard exponential representation [#909](https://github.com/ClickHouse/clickhouse-go/pull/909) by @vogrelord -- Add extra padding for strings shorter than FixedColumn length [#910](https://github.com/ClickHouse/clickhouse-go/pull/910) by @jkaflik - -### Chore - -- Remove Yandex ClickHouse image from Makefile [#895](https://github.com/ClickHouse/clickhouse-go/pull/895) by @alexey-milovidov -- Remove duplicate of error handling [#898](https://github.com/ClickHouse/clickhouse-go/pull/898) by @Astemirdum -- Bump github.com/ClickHouse/ch-go from 0.51.2 to 0.52.1 [#901](https://github.com/ClickHouse/clickhouse-go/pull/901) - -## 2.6.1, 2023-02-13 - -### Bug fixes - -- Do not reuse expired connections (`ConnMaxLifetime`) [#892](https://github.com/ClickHouse/clickhouse-go/pull/892) by @iamluc -- Extend default dial timeout value to 30s [#893](https://github.com/ClickHouse/clickhouse-go/pull/893) by @jkaflik -- Compression name fixed in sendQuery log [#884](https://github.com/ClickHouse/clickhouse-go/pull/884) by @fredngr - -## 2.6.0, 2023-01-27 - -### New features - -- Client info specification implementation [#876](https://github.com/ClickHouse/clickhouse-go/pull/876) by @jkaflik - -### Bug fixes - -- Better handling for broken connection errors in the std interface [#879](https://github.com/ClickHouse/clickhouse-go/pull/879) by @n-oden - -### Chore - -- Document way to provide table or database identifier with query parameters [#875](https://github.com/ClickHouse/clickhouse-go/pull/875) by @jkaflik -- Bump github.com/ClickHouse/ch-go from 0.51.0 to 0.51.2 [#881](https://github.com/ClickHouse/clickhouse-go/pull/881) - -## 2.5.1, 2023-01-10 - -### Bug fixes - -- Flag connection as closed on broken pipe [#871](https://github.com/ClickHouse/clickhouse-go/pull/871) by @n-oden - -## 2.5.0, 2023-01-10 - -### New features - -- Buffered compression column by column for a native protocol. Introduces the `MaxCompressionBuffer` option - max size (bytes) of compression buffer during column-by-column compression (default 10MiB) [#808](https://github.com/ClickHouse/clickhouse-go/pull/808) by @gingerwizard and @jkaflik -- Support custom types that implement `sql.Scanner` interface (e.g. `type customString string`) [#850](https://github.com/ClickHouse/clickhouse-go/pull/850) by @DarkDrim -- Append query options to the context instead of overwriting [#860](https://github.com/ClickHouse/clickhouse-go/pull/860) by @aaron276h -- Query parameters support [#854](https://github.com/ClickHouse/clickhouse-go/pull/854) by @jkaflik -- Expose `DialStrategy` function to the user for custom connection routing. [#855](https://github.com/ClickHouse/clickhouse-go/pull/855) by @jkaflik - -### Bug fixes - -- Close connection on `Cancel`. This is to make sure context timed out/canceled connection is not reused further [#764](https://github.com/ClickHouse/clickhouse-go/pull/764) by @gingerwizard -- Fully parse `secure` and `skip_verify` in DSN query parameters. [#862](https://github.com/ClickHouse/clickhouse-go/pull/862) by @n-oden - -### Chore - -- Added tests covering read-only user queries [#837](https://github.com/ClickHouse/clickhouse-go/pull/837) by @jkaflik -- Agreed on a batch append fail semantics [#853](https://github.com/ClickHouse/clickhouse-go/pull/853) by @jkaflik - -## 2.4.3, 2022-11-30 -### Bug Fixes -* Fix in batch concurrency - batch could panic if used in separate go routines.
-The issue was originally detected due to the use of a batch in a go routine and Abort being called after the connection was released on the batch. This would invalidate the connection which had been subsequently reassigned.
-This issue could occur as soon as the conn is released (this can happen in a number of places e.g. after Send or an Append error), and it potentially returns to the pool for use in another go routine. Subsequent releases could then occur e.g., the user calls Abort mainly but also Send would do it. The result is the connection being closed in the release function while another batch or query potentially used it.
-This release includes a guard to prevent release from being called more than once on a batch. It assumes that batches are not thread-safe - they aren't (only connections are). -## 2.4.2, 2022-11-24 -### Bug Fixes -- Don't panic on `Send()` on batch after invalid `Append`. [#830](https://github.com/ClickHouse/clickhouse-go/pull/830) -- Fix JSON issue with `nil` if column order is inconsisent. [#824](https://github.com/ClickHouse/clickhouse-go/pull/824) - -## 2.4.1, 2022-11-23 -### Bug Fixes -- Patch release to fix "Regression - escape character was not considered when comparing column names". [#828](https://github.com/ClickHouse/clickhouse-go/issues/828) - -## 2.4.0, 2022-11-22 -### New Features -- Support for Nullables in Tuples. [#821](https://github.com/ClickHouse/clickhouse-go/pull/821) [#817](https://github.com/ClickHouse/clickhouse-go/pull/817) -- Use headers for auth and not url if SSL. [#811](https://github.com/ClickHouse/clickhouse-go/pull/811) -- Support additional headers. [#811](https://github.com/ClickHouse/clickhouse-go/pull/811) -- Support int64 for DateTime. [#807](https://github.com/ClickHouse/clickhouse-go/pull/807) -- Support inserting Enums as int8/int16/int. [#802](https://github.com/ClickHouse/clickhouse-go/pull/802) -- Print error if unsupported server. [#792](https://github.com/ClickHouse/clickhouse-go/pull/792) -- Allow block buffer size to tuned for performance - see `BlockBufferSize`. [#776](https://github.com/ClickHouse/clickhouse-go/pull/776) -- Support custom datetime in Scan. [#767](https://github.com/ClickHouse/clickhouse-go/pull/767) -- Support insertion of an orderedmap. [#763](https://github.com/ClickHouse/clickhouse-go/pull/763) - -### Bug Fixes -- Decompress errors over HTTP. [#792](https://github.com/ClickHouse/clickhouse-go/pull/792) -- Use `timezone` vs `timeZone` so we work on older versions. [#781](https://github.com/ClickHouse/clickhouse-go/pull/781) -- Ensure only columns specified in INSERT are required in batch. [#790](https://github.com/ClickHouse/clickhouse-go/pull/790) -- Respect order of columns in insert for batch. [#790](https://github.com/ClickHouse/clickhouse-go/pull/790) -- Handle double pointers for Nullable columns when batch inserting. [#774](https://github.com/ClickHouse/clickhouse-go/pull/774) -- Use nil for `LowCardinality(Nullable(X))`. [#768](https://github.com/ClickHouse/clickhouse-go/pull/768) - -### Breaking Changes -- Align timezone handling with spec. [#776](https://github.com/ClickHouse/clickhouse-go/pull/766), specifically: - - If parsing strings for datetime, datetime64 or dates we assume the locale is Local (i.e. the client) if not specified in the string. - - The server (or column tz) is used for datetime and datetime64 rendering. For date/date32, these have no tz info in the server. For now, they will be rendered as UTC - consistent with the clickhouse-client - - Addresses bind when no location is set +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.8.3...v2.9.0 + +## 2.8.3, 2023-04-03 + +### Bug fixes + +- Revert: Expire idle connections no longer acquired during lifetime [#958](https://github.com/ClickHouse/clickhouse-go/pull/958) by @jkaflik + +## 2.8.2, 2023-03-31 + +### Bug fixes + +- Expire idle connections no longer acquired during lifetime [#945](https://github.com/ClickHouse/clickhouse-go/pull/945) by @jkaflik + +## 2.8.1, 2023-03-29 + +### Bug fixes + +- Fix idle connection check for TLS connections [#951](https://github.com/ClickHouse/clickhouse-go/pull/951) by @jkaflik & @alekar + +## 2.8.0, 2023-03-27 + +### New features + +- Support customized "url path" in http connection [#938](https://github.com/ClickHouse/clickhouse-go/pull/938) by @crisismaple +- Allow Auth.Database option to be empty [#926](https://github.com/ClickHouse/clickhouse-go/pull/938) by @v4run + +### Chores + +- Bump github.com/stretchr/testify from 1.8.1 to 1.8.2 [#933](https://github.com/ClickHouse/clickhouse-go/pull/933) +- fix: small typo in the text of an error [#936](https://github.com/ClickHouse/clickhouse-go/pull/936) by @lspgn +- Improved bug template [#916](https://github.com/ClickHouse/clickhouse-go/pull/916) by @mshustov + +## 2.7.0, 2023-03-08 + +### New features + +- Date type with user location [#923](https://github.com/ClickHouse/clickhouse-go/pull/923) by @jkaflik +- Add AppendRow function to BatchColumn [#927](https://github.com/ClickHouse/clickhouse-go/pull/927) by @pikot + +### Bug fixes + +- fix: fix connect.compression's format verb [#924](https://github.com/ClickHouse/clickhouse-go/pull/924) by @mind1949 +- Add extra padding for strings shorter than FixedColumn length [#910](https://github.com/ClickHouse/clickhouse-go/pull/910) by @jkaflik + +### Chore + +- Bump github.com/andybalholm/brotli from 1.0.4 to 1.0.5 [#911](https://github.com/ClickHouse/clickhouse-go/pull/911) +- Bump github.com/paulmach/orb from 0.8.0 to 0.9.0 [#912](https://github.com/ClickHouse/clickhouse-go/pull/912) +- Bump golang.org/x/net from 0.0.0-20220722155237-a158d28d115b to 0.7.0 [#928](https://github.com/ClickHouse/clickhouse-go/pull/928) + +## 2.6.5, 2023-02-28 + +### Bug fixes + +- Fix array parameter formatting in binding mechanism [#921](https://github.com/ClickHouse/clickhouse-go/pull/921) by @genzgd + +## 2.6.4, 2023-02-23 + +### Bug fixes + +- Fixed concurrency issue in stdConnOpener [#918](https://github.com/ClickHouse/clickhouse-go/pull/918) by @jkaflik + +## 2.6.3, 2023-02-22 + +### Bug fixes + +- Fixed `lib/binary/string_safe.go` for non 64bit arch [#914](https://github.com/ClickHouse/clickhouse-go/pull/914) by @atoulme + +## 2.6.2, 2023-02-20 + +### Bug fixes + +- Fix decimal encoding with non-standard exponential representation [#909](https://github.com/ClickHouse/clickhouse-go/pull/909) by @vogrelord +- Add extra padding for strings shorter than FixedColumn length [#910](https://github.com/ClickHouse/clickhouse-go/pull/910) by @jkaflik + +### Chore + +- Remove Yandex ClickHouse image from Makefile [#895](https://github.com/ClickHouse/clickhouse-go/pull/895) by @alexey-milovidov +- Remove duplicate of error handling [#898](https://github.com/ClickHouse/clickhouse-go/pull/898) by @Astemirdum +- Bump github.com/ClickHouse/ch-go from 0.51.2 to 0.52.1 [#901](https://github.com/ClickHouse/clickhouse-go/pull/901) + +## 2.6.1, 2023-02-13 + +### Bug fixes + +- Do not reuse expired connections (`ConnMaxLifetime`) [#892](https://github.com/ClickHouse/clickhouse-go/pull/892) by @iamluc +- Extend default dial timeout value to 30s [#893](https://github.com/ClickHouse/clickhouse-go/pull/893) by @jkaflik +- Compression name fixed in sendQuery log [#884](https://github.com/ClickHouse/clickhouse-go/pull/884) by @fredngr + +## 2.6.0, 2023-01-27 + +### New features + +- Client info specification implementation [#876](https://github.com/ClickHouse/clickhouse-go/pull/876) by @jkaflik + +### Bug fixes + +- Better handling for broken connection errors in the std interface [#879](https://github.com/ClickHouse/clickhouse-go/pull/879) by @n-oden + +### Chore + +- Document way to provide table or database identifier with query parameters [#875](https://github.com/ClickHouse/clickhouse-go/pull/875) by @jkaflik +- Bump github.com/ClickHouse/ch-go from 0.51.0 to 0.51.2 [#881](https://github.com/ClickHouse/clickhouse-go/pull/881) + +## 2.5.1, 2023-01-10 + +### Bug fixes + +- Flag connection as closed on broken pipe [#871](https://github.com/ClickHouse/clickhouse-go/pull/871) by @n-oden + +## 2.5.0, 2023-01-10 + +### New features + +- Buffered compression column by column for a native protocol. Introduces the `MaxCompressionBuffer` option - max size (bytes) of compression buffer during column-by-column compression (default 10MiB) [#808](https://github.com/ClickHouse/clickhouse-go/pull/808) by @gingerwizard and @jkaflik +- Support custom types that implement `sql.Scanner` interface (e.g. `type customString string`) [#850](https://github.com/ClickHouse/clickhouse-go/pull/850) by @DarkDrim +- Append query options to the context instead of overwriting [#860](https://github.com/ClickHouse/clickhouse-go/pull/860) by @aaron276h +- Query parameters support [#854](https://github.com/ClickHouse/clickhouse-go/pull/854) by @jkaflik +- Expose `DialStrategy` function to the user for custom connection routing. [#855](https://github.com/ClickHouse/clickhouse-go/pull/855) by @jkaflik + +### Bug fixes + +- Close connection on `Cancel`. This is to make sure context timed out/canceled connection is not reused further [#764](https://github.com/ClickHouse/clickhouse-go/pull/764) by @gingerwizard +- Fully parse `secure` and `skip_verify` in DSN query parameters. [#862](https://github.com/ClickHouse/clickhouse-go/pull/862) by @n-oden + +### Chore + +- Added tests covering read-only user queries [#837](https://github.com/ClickHouse/clickhouse-go/pull/837) by @jkaflik +- Agreed on a batch append fail semantics [#853](https://github.com/ClickHouse/clickhouse-go/pull/853) by @jkaflik + +## 2.4.3, 2022-11-30 +### Bug Fixes +* Fix in batch concurrency - batch could panic if used in separate go routines.
+The issue was originally detected due to the use of a batch in a go routine and Abort being called after the connection was released on the batch. This would invalidate the connection which had been subsequently reassigned.
+This issue could occur as soon as the conn is released (this can happen in a number of places e.g. after Send or an Append error), and it potentially returns to the pool for use in another go routine. Subsequent releases could then occur e.g., the user calls Abort mainly but also Send would do it. The result is the connection being closed in the release function while another batch or query potentially used it.
+This release includes a guard to prevent release from being called more than once on a batch. It assumes that batches are not thread-safe - they aren't (only connections are). +## 2.4.2, 2022-11-24 +### Bug Fixes +- Don't panic on `Send()` on batch after invalid `Append`. [#830](https://github.com/ClickHouse/clickhouse-go/pull/830) +- Fix JSON issue with `nil` if column order is inconsistent. [#824](https://github.com/ClickHouse/clickhouse-go/pull/824) + +## 2.4.1, 2022-11-23 +### Bug Fixes +- Patch release to fix "Regression - escape character was not considered when comparing column names". [#828](https://github.com/ClickHouse/clickhouse-go/issues/828) + +## 2.4.0, 2022-11-22 +### New Features +- Support for Nullables in Tuples. [#821](https://github.com/ClickHouse/clickhouse-go/pull/821) [#817](https://github.com/ClickHouse/clickhouse-go/pull/817) +- Use headers for auth and not url if SSL. [#811](https://github.com/ClickHouse/clickhouse-go/pull/811) +- Support additional headers. [#811](https://github.com/ClickHouse/clickhouse-go/pull/811) +- Support int64 for DateTime. [#807](https://github.com/ClickHouse/clickhouse-go/pull/807) +- Support inserting Enums as int8/int16/int. [#802](https://github.com/ClickHouse/clickhouse-go/pull/802) +- Print error if unsupported server. [#792](https://github.com/ClickHouse/clickhouse-go/pull/792) +- Allow block buffer size to tuned for performance - see `BlockBufferSize`. [#776](https://github.com/ClickHouse/clickhouse-go/pull/776) +- Support custom datetime in Scan. [#767](https://github.com/ClickHouse/clickhouse-go/pull/767) +- Support insertion of an orderedmap. [#763](https://github.com/ClickHouse/clickhouse-go/pull/763) + +### Bug Fixes +- Decompress errors over HTTP. [#792](https://github.com/ClickHouse/clickhouse-go/pull/792) +- Use `timezone` vs `timeZone` so we work on older versions. [#781](https://github.com/ClickHouse/clickhouse-go/pull/781) +- Ensure only columns specified in INSERT are required in batch. [#790](https://github.com/ClickHouse/clickhouse-go/pull/790) +- Respect order of columns in insert for batch. [#790](https://github.com/ClickHouse/clickhouse-go/pull/790) +- Handle double pointers for Nullable columns when batch inserting. [#774](https://github.com/ClickHouse/clickhouse-go/pull/774) +- Use nil for `LowCardinality(Nullable(X))`. [#768](https://github.com/ClickHouse/clickhouse-go/pull/768) + +### Breaking Changes +- Align timezone handling with spec. [#776](https://github.com/ClickHouse/clickhouse-go/pull/766), specifically: + - If parsing strings for datetime, datetime64 or dates we assume the locale is Local (i.e. the client) if not specified in the string. + - The server (or column tz) is used for datetime and datetime64 rendering. For date/date32, these have no tz info in the server. For now, they will be rendered as UTC - consistent with the clickhouse-client + - Addresses bind when no location is set diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile b/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile index acd3a74f..28c63515 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/Makefile @@ -1,5 +1,5 @@ CLICKHOUSE_VERSION ?= latest -CLICKHOUSE_TEST_TIMEOUT ?= 120s +CLICKHOUSE_TEST_TIMEOUT ?= 240s CLICKHOUSE_QUORUM_INSERT ?= 1 up: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md index ef08eb0a..ed83cf67 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md @@ -2,28 +2,6 @@ Golang SQL database client for [ClickHouse](https://clickhouse.com/). -## Versions - -There are two version of this client, v1 and v2, available as separate branches. - -**v1 is now in a state of a maintenance - we will only accept PRs for bug and security fixes.** - -Users should use v2 which is production ready and [significantly faster than v1](#benchmark). - -v2 has breaking changes for users migrating from v1. These were not properly tracked prior to this client being officially supported. We endeavour to track known differences [here](https://github.com/ClickHouse/clickhouse-go/blob/main/v1_v2_CHANGES.md) and resolve where possible. - -## Supported ClickHouse Versions - -The client is tested against the currently [supported versions](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md) of ClickHouse - -## Supported Golang Versions - -| Client Version | Golang Versions | -|----------------|-----------------| -| => 2.0 <= 2.2 | 1.17, 1.18 | -| >= 2.3 | 1.18.4+, 1.19 | -| >= 2.14 | 1.20, 1.21 | - ## Key features * Uses ClickHouse native format for optimal performance. Utilises low level [ch-go](https://github.com/ClickHouse/ch-go) client for encoding/decoding and compression (versions >= 2.3.0). @@ -55,6 +33,22 @@ Support for the ClickHouse protocol advanced features using `Context`: * Profile info * Profile events + +## Supported ClickHouse Versions + +The client is tested against the currently [supported versions](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md) of ClickHouse + +## Supported Golang Versions + +| Client Version | Golang Versions | +|----------------|------------------------| +| => 2.0 <= 2.2 | 1.17, 1.18 | +| >= 2.3 | 1.18.4+, 1.19 | +| >= 2.14 | 1.20, 1.21 | +| >= 2.19 | 1.21, 1.22 | +| >= 2.28 | 1.22, 1.23 | +| >= 2.29 | 1.21, 1.22, 1.23, 1.24 | + ## Documentation [https://clickhouse.com/docs/en/integrations/go](https://clickhouse.com/docs/en/integrations/go) @@ -151,8 +145,9 @@ conn.SetConnMaxLifetime(time.Hour) * username/password - auth credentials * database - select the current default database * dial_timeout - a duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix such as "300ms", "1s". Valid time units are "ms", "s", "m". (default 30s) -* connection_open_strategy - round_robin/in_order (default in_order). - * round_robin - choose a round-robin server from the set +* connection_open_strategy - random/round_robin/in_order (default in_order). + * random - choose random server from the set + * round_robin - choose a round-robin server from the set * in_order - first live server is chosen in specified order * debug - enable debug output (boolean value) * compress - compress - specify the compression algorithm - “none” (default), `zstd`, `lz4`, `gzip`, `deflate`, `br`. If set to `true`, `lz4` will be used. @@ -165,6 +160,7 @@ conn.SetConnMaxLifetime(time.Hour) * read_timeout - a duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix such as "300ms", "1s". Valid time units are "ms", "s", "m" (default 5m). * max_compression_buffer - max size (bytes) of compression buffer during column by column compression (default 10MiB) * client_info_product - optional list (comma separated) of product name and version pair separated with `/`. This value will be pass a part of client info. e.g. `client_info_product=my_app/1.0,my_module/0.1` More details in [Client info](#client-info) section. +* http_proxy - HTTP proxy address SSL/TLS parameters: @@ -179,6 +175,8 @@ clickhouse://username:password@host1:9000,host2:9000/database?dial_timeout=200ms ### HTTP Support (Experimental) +**Note**: using HTTP protocol is possible only with `database/sql` interface. + The native format can be used over the HTTP protocol. This is useful in scenarios where users need to proxy traffic e.g. using [ChProxy](https://www.chproxy.org/) or via load balancers. This can be achieved by modifying the DSN to specify the HTTP protocol. @@ -208,7 +206,19 @@ conn := clickhouse.OpenDB(&clickhouse.Options{ }) ``` -**Note**: using HTTP protocol is possible only with `database/sql` interface. +#### Proxy support + +HTTP proxy can be set in the DSN string by specifying the `http_proxy` parameter. +(make sure to URL encode the proxy address) + +```sh +http://host1:8123,host2:8123/database?dial_timeout=200ms&max_execution_time=60&http_proxy=http%3A%2F%2Fproxy%3A8080 +``` + +If you are using `clickhouse.OpenDB`, set the `HTTProxy` field in the `clickhouse.Options`. + +An alternative way is to enable proxy by setting the `HTTP_PROXY` (for HTTP) or `HTTPS_PROXY` (for HTTPS) environment variables. +See more details in the [Go documentation](https://pkg.go.dev/net/http#ProxyFromEnvironment). ## Compression @@ -319,7 +329,7 @@ go get -u github.com/ClickHouse/clickhouse-go/v2 * [batch struct](examples/clickhouse_api/append_struct.go) * [columnar](examples/clickhouse_api/columnar_insert.go) * [scan struct](examples/clickhouse_api/scan_struct.go) -* [query parameters](examples/clickhouse_api/query_parameters.go) (deprecated in favour of native query parameters) +* [query parameters](examples/clickhouse_api/query_parameters.go) * [bind params](examples/clickhouse_api/bind.go) (deprecated in favour of native query parameters) * [client info](examples/clickhouse_api/client_info.go) @@ -332,6 +342,10 @@ go get -u github.com/ClickHouse/clickhouse-go/v2 * [bind params](examples/std/bind.go) (deprecated in favour of native query parameters) * [client info](examples/std/client_info.go) +## Third-party libraries + +* [clickhouse-go-rows-utils](https://github.com/EpicStep/clickhouse-go-rows-utils) - utilities that simplify working with rows. + ## ClickHouse alternatives - ch-go Versions of this client >=2.3.x utilise [ch-go](https://github.com/ClickHouse/ch-go) for their low level encoding/decoding. This low lever client provides a high performance columnar interface and should be used in performance critical use cases. This client provides more familar row orientated and `database/sql` semantics at the cost of some performance. diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md index 6712539e..fe1a746a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md @@ -2,7 +2,7 @@ The following table aims to capture the Golang types supported for each ClickHou Whilst each ClickHouse type often has a logical Golang type, we aim to support implicit conversions where possible and provided no precision loss will be incurred - thus alleviating the need for users to ensure their data aligns perfectly with ClickHouse types. -This effort is ongoing and can be seperated in to insertion (`Append`/`AppendRow`) and read time (via a `Scan`). Should you need support for a specific conversion, please raise an issue. +This effort is ongoing and can be separated in to insertion (`Append`/`AppendRow`) and read time (via a `Scan`). Should you need support for a specific conversion, please raise an issue. ## Append Support diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/batch.go new file mode 100644 index 00000000..cf376f38 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/batch.go @@ -0,0 +1,56 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import ( + "fmt" + "regexp" + "strings" +) + +var normalizeInsertQueryMatch = regexp.MustCompile(`(?i)(INSERT\s+INTO\s+([^(]+)(?:\s*\([^()]*(?:\([^()]*\)[^()]*)*\))?)(?:\s*VALUES)?`) +var truncateFormat = regexp.MustCompile(`(?i)\sFORMAT\s+[^\s]+`) +var truncateValues = regexp.MustCompile(`\sVALUES\s.*$`) +var extractInsertColumnsMatch = regexp.MustCompile(`(?si)INSERT INTO .+\s\((?P.+)\)$`) + +func extractNormalizedInsertQueryAndColumns(query string) (normalizedQuery string, tableName string, columns []string, err error) { + query = truncateFormat.ReplaceAllString(query, "") + query = truncateValues.ReplaceAllString(query, "") + + matches := normalizeInsertQueryMatch.FindStringSubmatch(query) + if len(matches) == 0 { + err = fmt.Errorf("invalid INSERT query: %s", query) + return + } + + normalizedQuery = fmt.Sprintf("%s FORMAT Native", matches[1]) + tableName = strings.TrimSpace(matches[2]) + + columns = make([]string, 0) + matches = extractInsertColumnsMatch.FindStringSubmatch(matches[1]) + if len(matches) == 2 { + columns = strings.Split(matches[1], ",") + for i := range columns { + // refers to https://clickhouse.com/docs/en/sql-reference/syntax#identifiers + // we can use identifiers with double quotes or backticks, for example: "id", `id`, but not both, like `"id"`. + columns[i] = strings.Trim(strings.Trim(strings.TrimSpace(columns[i]), "\""), "`") + } + } + + return +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go index 7f5ac393..f6373078 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go @@ -59,8 +59,10 @@ func DateNamed(name string, value time.Time, scale TimeUnit) driver.NamedDateVal } } -var bindNumericRe = regexp.MustCompile(`\$[0-9]+`) -var bindPositionalRe = regexp.MustCompile(`[^\\][?]`) +var ( + bindNumericRe = regexp.MustCompile(`\$[0-9]+`) + bindPositionalRe = regexp.MustCompile(`[^\\][?]`) +) func bind(tz *time.Location, query string, args ...any) (string, error) { if len(args) == 0 { @@ -245,6 +247,12 @@ func bindNamed(tz *time.Location, query string, args ...any) (_ string, err erro func formatTime(tz *time.Location, scale TimeUnit, value time.Time) (string, error) { switch value.Location().String() { case "Local", "": + // It's required to pass timestamp as string due to decimal overflow for higher precision, + // but zero-value string "toDateTime('0')" will be not parsed by ClickHouse. + if value.Unix() == 0 { + return "toDateTime(0)", nil + } + switch scale { case Seconds: return fmt.Sprintf("toDateTime('%d')", value.Unix()), nil @@ -262,7 +270,7 @@ func formatTime(tz *time.Location, scale TimeUnit, value time.Time) (string, err return fmt.Sprintf("toDateTime64('%s', %d)", value.Format(fmt.Sprintf("2006-01-02 15:04:05.%0*d", int(scale*3), 0)), int(scale*3)), nil } if scale == Seconds { - return value.Format(fmt.Sprintf("toDateTime('2006-01-02 15:04:05', '%s')", value.Location().String())), nil + return fmt.Sprintf("toDateTime('%s', '%s')", value.Format("2006-01-02 15:04:05"), value.Location().String()), nil } return fmt.Sprintf("toDateTime64('%s', %d, '%s')", value.Format(fmt.Sprintf("2006-01-02 15:04:05.%0*d", int(scale*3), 0)), int(scale*3), value.Location().String()), nil } @@ -304,6 +312,11 @@ func format(tz *time.Location, scale TimeUnit, v any) (string, error) { } return fmt.Sprintf("[%s]", val), nil case fmt.Stringer: + if v := reflect.ValueOf(v); v.Kind() == reflect.Pointer && + v.IsNil() && + v.Type().Elem().Implements(reflect.TypeOf((*fmt.Stringer)(nil)).Elem()) { + return "NULL", nil + } return quote(v.String()), nil case column.OrderedMap: values := make([]string, 0) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/chcol.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/chcol.go new file mode 100644 index 00000000..65b65f4c --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/chcol.go @@ -0,0 +1,70 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import "github.com/ClickHouse/clickhouse-go/v2/lib/chcol" + +// Re-export chcol types/funcs to top level clickhouse package + +type ( + // Variant represents a ClickHouse Variant type that can hold multiple possible types + Variant = chcol.Variant + // Dynamic is an alias for the Variant type + Dynamic = chcol.Dynamic + // JSON represents a ClickHouse JSON type that can hold multiple possible types + JSON = chcol.JSON + + // JSONSerializer interface allows a struct to be manually converted to an optimized JSON structure instead of relying + // on recursive reflection. + // Note that the struct must be a pointer in order for the interface to be matched, reflection will be used otherwise. + JSONSerializer = chcol.JSONSerializer + // JSONDeserializer interface allows a struct to load its data from an optimized JSON structure instead of relying + // on recursive reflection to set its fields. + JSONDeserializer = chcol.JSONDeserializer +) + +// NewVariant creates a new Variant with the given value +func NewVariant(v any) Variant { + return chcol.NewVariant(v) +} + +// NewVariantWithType creates a new Variant with the given value and ClickHouse type +func NewVariantWithType(v any, chType string) Variant { + return chcol.NewVariantWithType(v, chType) +} + +// NewDynamic creates a new Dynamic with the given value +func NewDynamic(v any) Dynamic { + return chcol.NewDynamic(v) +} + +// NewDynamicWithType creates a new Dynamic with the given value and ClickHouse type +func NewDynamicWithType(v any, chType string) Dynamic { + return chcol.NewDynamicWithType(v, chType) +} + +// NewJSON creates a new empty JSON value +func NewJSON() *JSON { + return chcol.NewJSON() +} + +// ExtractJSONPathAs is a convenience function for asserting a path to a specific type. +// The underlying value is also extracted from its Dynamic wrapper if present. +func ExtractJSONPathAs[T any](o *JSON, path string) (valueAs T, ok bool) { + return chcol.ExtractJSONPathAs[T](o, path) +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go index a565247a..f5399f87 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "math/rand" "sync/atomic" "time" @@ -239,7 +240,10 @@ func DefaultDialStrategy(ctx context.Context, connID int, opt *Options, dial Dia case ConnOpenInOrder: num = i case ConnOpenRoundRobin: - num = (int(connID) + i) % len(opt.Addr) + num = (connID + i) % len(opt.Addr) + case ConnOpenRandom: + random := rand.Int() + num = (random + i) % len(opt.Addr) } if r, err = dial(ctx, opt.Addr[num], opt); err == nil { @@ -265,6 +269,8 @@ func (ch *clickhouse) acquire(ctx context.Context) (conn *connect, err error) { select { case <-timer.C: return nil, ErrAcquireConnTimeout + case <-ctx.Done(): + return nil, ctx.Err() case ch.open <- struct{}{}: } select { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go index d64b04eb..fe15e6fa 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go @@ -20,15 +20,16 @@ package clickhouse import ( "context" "crypto/tls" + "errors" "fmt" "net" + "net/http" "net/url" "strconv" "strings" "time" "github.com/ClickHouse/ch-go/compress" - "github.com/pkg/errors" ) type CompressionMethod byte @@ -41,6 +42,8 @@ func (c CompressionMethod) String() string { return "zstd" case CompressionLZ4: return "lz4" + case CompressionLZ4HC: + return "lz4hc" case CompressionGZIP: return "gzip" case CompressionDeflate: @@ -55,6 +58,7 @@ func (c CompressionMethod) String() string { const ( CompressionNone = CompressionMethod(compress.None) CompressionLZ4 = CompressionMethod(compress.LZ4) + CompressionLZ4HC = CompressionMethod(compress.LZ4HC) CompressionZSTD = CompressionMethod(compress.ZSTD) CompressionGZIP = CompressionMethod(0x95) CompressionDeflate = CompressionMethod(0x96) @@ -65,6 +69,7 @@ var compressionMap = map[string]CompressionMethod{ "none": CompressionNone, "zstd": CompressionZSTD, "lz4": CompressionLZ4, + "lz4hc": CompressionLZ4HC, "gzip": CompressionGZIP, "deflate": CompressionDeflate, "br": CompressionBrotli, @@ -78,7 +83,7 @@ type Auth struct { // has_control_character type Compression struct { Method CompressionMethod - // this only applies to zlib and brotli compression algorithms + // this only applies to lz4, lz4hc, zlib, and brotli compression algorithms Level int } @@ -87,6 +92,7 @@ type ConnOpenStrategy uint8 const ( ConnOpenInOrder ConnOpenStrategy = iota ConnOpenRoundRobin + ConnOpenRandom ) type Protocol int @@ -120,6 +126,8 @@ type DialResult struct { conn *connect } +type HTTPProxy func(*http.Request) (*url.URL, error) + type Options struct { Protocol Protocol ClientInfo ClientInfo @@ -141,8 +149,12 @@ type Options struct { FreeBufOnConnRelease bool // drop preserved memory buffer after each query HttpHeaders map[string]string // set additional headers on HTTP requests HttpUrlPath string // set additional URL path for HTTP requests + HttpMaxConnsPerHost int // MaxConnsPerHost for http.Transport BlockBufferSize uint8 // default 2 - can be overwritten on query - MaxCompressionBuffer int // default 10485760 - measured in bytes i.e. 10MiB + MaxCompressionBuffer int // default 10485760 - measured in bytes i.e. + + // HTTPProxy specifies an HTTP proxy URL to use for requests made by the client. + HTTPProxyURL *url.URL scheme string ReadTimeout time.Duration @@ -199,7 +211,7 @@ func (o *Options) fromDSN(in string) error { case "compress_level": level, err := strconv.ParseInt(params.Get(v), 10, 8) if err != nil { - return errors.Wrap(err, "compress_level invalid value") + return fmt.Errorf("compress_level invalid value: %w", err) } if o.Compression == nil { @@ -215,7 +227,7 @@ func (o *Options) fromDSN(in string) error { case "max_compression_buffer": max, err := strconv.Atoi(params.Get(v)) if err != nil { - return errors.Wrap(err, "max_compression_buffer invalid value") + return fmt.Errorf("max_compression_buffer invalid value: %w", err) } o.MaxCompressionBuffer = max case "dial_timeout": @@ -265,23 +277,25 @@ func (o *Options) fromDSN(in string) error { o.ConnOpenStrategy = ConnOpenInOrder case "round_robin": o.ConnOpenStrategy = ConnOpenRoundRobin + case "random": + o.ConnOpenStrategy = ConnOpenRandom } case "max_open_conns": maxOpenConns, err := strconv.Atoi(params.Get(v)) if err != nil { - return errors.Wrap(err, "max_open_conns invalid value") + return fmt.Errorf("max_open_conns invalid value: %w", err) } o.MaxOpenConns = maxOpenConns case "max_idle_conns": maxIdleConns, err := strconv.Atoi(params.Get(v)) if err != nil { - return errors.Wrap(err, "max_idle_conns invalid value") + return fmt.Errorf("max_idle_conns invalid value: %w", err) } o.MaxIdleConns = maxIdleConns case "conn_max_lifetime": connMaxLifetime, err := time.ParseDuration(params.Get(v)) if err != nil { - return errors.Wrap(err, "conn_max_lifetime invalid value") + return fmt.Errorf("conn_max_lifetime invalid value: %w", err) } o.ConnMaxLifetime = connMaxLifetime case "username": @@ -299,6 +313,12 @@ func (o *Options) fromDSN(in string) error { version, }) } + case "http_proxy": + proxyURL, err := url.Parse(params.Get(v)) + if err != nil { + return fmt.Errorf("clickhouse [dsn parse]: http_proxy: %s", err) + } + o.HTTPProxyURL = proxyURL default: switch p := strings.ToLower(params.Get(v)); p { case "true": diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go index 698905e7..1686e902 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go @@ -46,6 +46,9 @@ func (r *rows) Next() (result bool) { } next: if r.row >= r.block.Rows() { + if r.stream == nil { + return false + } select { case err := <-r.errors: if err != nil { @@ -95,27 +98,43 @@ func (r *rows) Columns() []string { } func (r *rows) Close() error { - active := 2 + if r.errors == nil && r.stream == nil { + return r.err + } + + if r.errors == nil { + for range r.stream { + } + return nil + } + + if r.stream == nil { + for err := range r.errors { + r.err = err + } + return r.err + } + + errorsClosed := false + streamClosed := false for { select { case _, ok := <-r.stream: if !ok { - active-- - if active == 0 { - return r.err - } + streamClosed = true } case err, ok := <-r.errors: if err != nil { r.err = err } if !ok { - active-- - if active == 0 { - return r.err - } + errorsClosed = true } } + + if errorsClosed && streamClosed { + return r.err + } } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go index 7ab67066..c6871d4a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go @@ -25,6 +25,8 @@ import ( "fmt" "io" "log" + "math/rand" + "net" "os" "reflect" "strings" @@ -46,7 +48,11 @@ type stdConnOpener struct { func (o *stdConnOpener) Driver() driver.Driver { var debugf = func(format string, v ...any) {} if o.opt.Debug { - debugf = log.New(os.Stdout, "[clickhouse-std] ", 0).Printf + if o.opt.Debugf != nil { + debugf = o.opt.Debugf + } else { + debugf = log.New(os.Stdout, "[clickhouse-std] ", 0).Printf + } } return &stdDriver{debugf: debugf} } @@ -83,12 +89,19 @@ func (o *stdConnOpener) Connect(ctx context.Context) (_ driver.Conn, err error) case ConnOpenInOrder: num = i case ConnOpenRoundRobin: - num = (int(connID) + i) % len(o.opt.Addr) + num = (connID + i) % len(o.opt.Addr) + case ConnOpenRandom: + random := rand.Int() + num = (random + i) % len(o.opt.Addr) } if conn, err = dialFunc(ctx, o.opt.Addr[num], connID, o.opt); err == nil { var debugf = func(format string, v ...any) {} if o.opt.Debug { - debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse-std][conn=%d][%s] ", num, o.opt.Addr[num]), 0).Printf + if o.opt.Debugf != nil { + debugf = o.opt.Debugf + } else { + debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse-std][conn=%d][%s] ", num, o.opt.Addr[num]), 0).Printf + } } return &stdDriver{ conn: conn, @@ -102,6 +115,8 @@ func (o *stdConnOpener) Connect(ctx context.Context) (_ driver.Conn, err error) return nil, err } +var _ driver.Connector = (*stdConnOpener)(nil) + func init() { var debugf = func(format string, v ...any) {} sql.Register("clickhouse", &stdDriver{debugf: debugf}) @@ -110,7 +125,10 @@ func init() { // isConnBrokenError returns true if the error class indicates that the // db connection is no longer usable and should be marked bad func isConnBrokenError(err error) bool { - if errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) { + if errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { + return true + } + if _, ok := err.(*net.OpError); ok { return true } return false @@ -125,7 +143,11 @@ func Connector(opt *Options) driver.Connector { var debugf = func(format string, v ...any) {} if o.Debug { - debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf + if o.Debugf != nil { + debugf = o.Debugf + } else { + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf + } } return &stdConnOpener{ opt: o, @@ -149,7 +171,11 @@ func OpenDB(opt *Options) *sql.DB { settings = append(settings, "SetConnMaxLifetime") } if opt.Debug { - debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf + if opt.Debugf != nil { + debugf = opt.Debugf + } else { + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf + } } if len(settings) != 0 { return sql.OpenDB(&stdConnOpener{ @@ -180,6 +206,12 @@ type stdDriver struct { debugf func(format string, v ...any) } +var _ driver.Conn = (*stdDriver)(nil) +var _ driver.ConnBeginTx = (*stdDriver)(nil) +var _ driver.ExecerContext = (*stdDriver)(nil) +var _ driver.QueryerContext = (*stdDriver)(nil) +var _ driver.ConnPrepareContext = (*stdDriver)(nil) + func (std *stdDriver) Open(dsn string) (_ driver.Conn, err error) { var opt Options if err := opt.fromDSN(dsn); err != nil { @@ -195,6 +227,8 @@ func (std *stdDriver) Open(dsn string) (_ driver.Conn, err error) { return (&stdConnOpener{opt: o, debugf: debugf}).Connect(context.Background()) } +var _ driver.Driver = (*stdDriver)(nil) + func (std *stdDriver) ResetSession(ctx context.Context) error { if std.conn.isBad() { std.debugf("Resetting session because connection is bad") @@ -203,9 +237,36 @@ func (std *stdDriver) ResetSession(ctx context.Context) error { return nil } -func (std *stdDriver) Ping(ctx context.Context) error { return std.conn.ping(ctx) } +var _ driver.SessionResetter = (*stdDriver)(nil) -func (std *stdDriver) Begin() (driver.Tx, error) { return std, nil } +func (std *stdDriver) Ping(ctx context.Context) error { + if std.conn.isBad() { + std.debugf("Ping: connection is bad") + return driver.ErrBadConn + } + + return std.conn.ping(ctx) +} + +var _ driver.Pinger = (*stdDriver)(nil) + +func (std *stdDriver) Begin() (driver.Tx, error) { + if std.conn.isBad() { + std.debugf("Begin: connection is bad") + return nil, driver.ErrBadConn + } + + return std, nil +} + +func (std *stdDriver) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if std.conn.isBad() { + std.debugf("BeginTx: connection is bad") + return nil, driver.ErrBadConn + } + + return std, nil +} func (std *stdDriver) Commit() error { if std.commit == nil { @@ -232,13 +293,26 @@ func (std *stdDriver) Rollback() error { return nil } +var _ driver.Tx = (*stdDriver)(nil) + func (std *stdDriver) CheckNamedValue(nv *driver.NamedValue) error { return nil } +var _ driver.NamedValueChecker = (*stdDriver)(nil) + func (std *stdDriver) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - if options := queryOptions(ctx); options.async.ok { - return driver.RowsAffected(0), std.conn.asyncInsert(ctx, query, options.async.wait, rebind(args)...) + if std.conn.isBad() { + std.debugf("ExecContext: connection is bad") + return nil, driver.ErrBadConn + } + + var err error + if asyncOpt := queryOptionsAsync(ctx); asyncOpt.ok { + err = std.conn.asyncInsert(ctx, query, asyncOpt.wait, rebind(args)...) + } else { + err = std.conn.exec(ctx, query, rebind(args)...) } - if err := std.conn.exec(ctx, query, rebind(args)...); err != nil { + + if err != nil { if isConnBrokenError(err) { std.debugf("ExecContext got a fatal error, resetting connection: %v\n", err) return nil, driver.ErrBadConn @@ -250,6 +324,11 @@ func (std *stdDriver) ExecContext(ctx context.Context, query string, args []driv } func (std *stdDriver) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + if std.conn.isBad() { + std.debugf("QueryContext: connection is bad") + return nil, driver.ErrBadConn + } + r, err := std.conn.query(ctx, func(*connect, error) {}, query, rebind(args)...) if isConnBrokenError(err) { std.debugf("QueryContext got a fatal error, resetting connection: %v\n", err) @@ -270,6 +349,11 @@ func (std *stdDriver) Prepare(query string) (driver.Stmt, error) { } func (std *stdDriver) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if std.conn.isBad() { + std.debugf("PrepareContext: connection is bad") + return nil, driver.ErrBadConn + } + batch, err := std.conn.prepareBatch(ctx, query, ldriver.PrepareBatchOptions{}, func(*connect, error) {}, func(context.Context) (*connect, error) { return nil, nil }) if err != nil { if isConnBrokenError(err) { @@ -324,7 +408,10 @@ func (s *stdBatch) ExecContext(ctx context.Context, args []driver.NamedValue) (d return s.Exec(values) } +var _ driver.StmtExecContext = (*stdBatch)(nil) + func (s *stdBatch) Query(args []driver.Value) (driver.Rows, error) { + // Note: not implementing driver.StmtQueryContext accordingly return nil, errors.New("only Exec method supported in batch mode") } @@ -343,6 +430,8 @@ func (r *stdRows) ColumnTypeScanType(idx int) reflect.Type { return r.rows.block.Columns[idx].ScanType() } +var _ driver.RowsColumnTypeScanType = (*stdRows)(nil) + func (r *stdRows) ColumnTypeDatabaseTypeName(idx int) string { return string(r.rows.block.Columns[idx].Type()) } @@ -356,15 +445,27 @@ func (r *stdRows) ColumnTypePrecisionScale(idx int) (precision, scale int64, ok switch col := r.rows.block.Columns[idx].(type) { case *column.Decimal: return col.Precision(), col.Scale(), true + case *column.DateTime64: + p, ok := col.Precision() + return p, 0, ok case interface{ Base() column.Interface }: switch col := col.Base().(type) { case *column.Decimal: return col.Precision(), col.Scale(), true + case *column.DateTime64: + p, ok := col.Precision() + return p, 0, ok } } return 0, 0, false } +var _ driver.Rows = (*stdRows)(nil) +var _ driver.RowsNextResultSet = (*stdRows)(nil) +var _ driver.RowsColumnTypeDatabaseTypeName = (*stdRows)(nil) +var _ driver.RowsColumnTypeNullable = (*stdRows)(nil) +var _ driver.RowsColumnTypePrecisionScale = (*stdRows)(nil) + func (r *stdRows) Next(dest []driver.Value) error { if len(r.rows.block.Columns) != len(dest) { err := fmt.Errorf("expected %d destination arguments in Next, not %d", len(r.rows.block.Columns), len(dest)) @@ -386,6 +487,21 @@ func (r *stdRows) Next(dest []driver.Value) error { } dest[i] = v default: + // We don't know what is the destination type at this stage, + // but destination type might be a sql.Null* type that expects to receive a value + // instead of a pointer to a value. ClickHouse-go returns pointers to values for nullable columns. + // + // This is a compatibility layer to make sure that the driver works with the standard library. + // Due to reflection used it has a performance cost. + if nullable { + if value == nil { + dest[i] = nil + continue + } + rv := reflect.ValueOf(value) + value = rv.Elem().Interface() + } + dest[i] = value } } @@ -413,6 +529,8 @@ func (r *stdRows) NextResultSet() error { return nil } +var _ driver.RowsNextResultSet = (*stdRows)(nil) + func (r *stdRows) Close() error { err := r.rows.Close() if err != nil { @@ -420,3 +538,5 @@ func (r *stdRows) Close() error { } return err } + +var _ driver.Rows = (*stdRows)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go index d7b7c242..07856f45 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go @@ -29,8 +29,8 @@ const ClientName = "clickhouse-go" const ( ClientVersionMajor = 2 - ClientVersionMinor = 17 - ClientVersionPatch = 1 + ClientVersionMinor = 34 + ClientVersionPatch = 0 ClientTCPProtocolVersion = proto.DBMS_TCP_PROTOCOL_VERSION ) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go index 5c2c34b6..9dee9fc3 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go @@ -20,16 +20,17 @@ package clickhouse import ( "context" "crypto/tls" + "errors" "fmt" "io" "log" "net" "os" + "sync" "syscall" "time" "github.com/ClickHouse/clickhouse-go/v2/resources" - "github.com/pkg/errors" "github.com/ClickHouse/ch-go/compress" chproto "github.com/ClickHouse/ch-go/proto" @@ -42,6 +43,7 @@ func dial(ctx context.Context, addr string, num int, opt *Options) (*connect, er conn net.Conn debugf = func(format string, v ...any) {} ) + switch { case opt.DialContext != nil: conn, err = opt.DialContext(ctx, addr) @@ -53,24 +55,40 @@ func dial(ctx context.Context, addr string, num int, opt *Options) (*connect, er conn, err = net.DialTimeout("tcp", addr, opt.DialTimeout) } } + if err != nil { return nil, err } + if opt.Debug { if opt.Debugf != nil { - debugf = opt.Debugf + debugf = func(format string, v ...any) { + opt.Debugf( + "[clickhouse][conn=%d][%s] "+format, + append([]interface{}{num, conn.RemoteAddr()}, v...)..., + ) + } } else { debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse][conn=%d][%s]", num, conn.RemoteAddr()), 0).Printf } } - compression := CompressionNone + + var ( + compression CompressionMethod + compressor *compress.Writer + ) if opt.Compression != nil { switch opt.Compression.Method { - case CompressionLZ4, CompressionZSTD, CompressionNone: + case CompressionLZ4, CompressionLZ4HC, CompressionZSTD, CompressionNone: compression = opt.Compression.Method default: return nil, fmt.Errorf("unsupported compression method for native protocol") } + + compressor = compress.NewWriter(compress.Level(opt.Compression.Level), compress.Method(opt.Compression.Method)) + } else { + compression = CompressionNone + compressor = compress.NewWriter(compress.LevelZero, compress.None) } var ( @@ -85,15 +103,17 @@ func dial(ctx context.Context, addr string, num int, opt *Options) (*connect, er structMap: &structMap{}, compression: compression, connectedAt: time.Now(), - compressor: compress.NewWriter(), + compressor: compressor, readTimeout: opt.ReadTimeout, blockBufferSize: opt.BlockBufferSize, maxCompressionBuffer: opt.MaxCompressionBuffer, } ) + if err := connect.handshake(opt.Auth.Database, opt.Auth.Username, opt.Auth.Password); err != nil { return nil, err } + if connect.revision >= proto.DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM { if err := connect.sendAddendum(); err != nil { return nil, err @@ -104,6 +124,7 @@ func dial(ctx context.Context, addr string, num int, opt *Options) (*connect, er if num == 1 && !resources.ClientMeta.IsSupportedClickHouseVersion(connect.server.Version) { debugf("[handshake] WARNING: version %v of ClickHouse is not supported by this client - client supports %v", connect.server.Version, resources.ClientMeta.SupportedVersions()) } + return connect, nil } @@ -126,6 +147,8 @@ type connect struct { readTimeout time.Duration blockBufferSize uint8 maxCompressionBuffer int + readerMutex sync.Mutex + closeMutex sync.Mutex } func (c *connect) settings(querySettings Settings) []proto.Setting { @@ -148,15 +171,16 @@ func (c *connect) settings(querySettings Settings) []proto.Setting { for k, v := range c.opt.Settings { settings = append(settings, settingToProtoSetting(k, v)) } + for k, v := range querySettings { settings = append(settings, settingToProtoSetting(k, v)) } + return settings } func (c *connect) isBad() bool { - switch { - case c.closed: + if c.isClosed() { return true } @@ -167,19 +191,44 @@ func (c *connect) isBad() bool { if err := c.connCheck(); err != nil { return true } + return false } +func (c *connect) isClosed() bool { + c.closeMutex.Lock() + defer c.closeMutex.Unlock() + + return c.closed +} + +func (c *connect) setClosed() { + c.closeMutex.Lock() + defer c.closeMutex.Unlock() + + c.closed = true +} + func (c *connect) close() error { + c.closeMutex.Lock() if c.closed { + c.closeMutex.Unlock() return nil } c.closed = true - c.buffer = nil - c.reader = nil + c.closeMutex.Unlock() + if err := c.conn.Close(); err != nil { return err } + + c.buffer = nil + c.compressor = nil + + c.readerMutex.Lock() + c.reader = nil + c.readerMutex.Unlock() + return nil } @@ -188,6 +237,7 @@ func (c *connect) progress() (*Progress, error) { if err := progress.Decode(c.reader, c.revision); err != nil { return nil, err } + c.debugf("[progress] %s", &progress) return &progress, nil } @@ -197,6 +247,7 @@ func (c *connect) exception() error { if err := e.Decode(c.reader); err != nil { return err } + c.debugf("[exception] %s", e.Error()) return &e } @@ -204,8 +255,8 @@ func (c *connect) exception() error { func (c *connect) compressBuffer(start int) error { if c.compression != CompressionNone && len(c.buffer.Buf) > 0 { data := c.buffer.Buf[start:] - if err := c.compressor.Compress(compress.Method(c.compression), data); err != nil { - return errors.Wrap(err, "compress") + if err := c.compressor.Compress(data); err != nil { + return fmt.Errorf("compress: %w", err) } c.buffer.Buf = append(c.buffer.Buf[:start], c.compressor.Data...) } @@ -213,6 +264,12 @@ func (c *connect) compressBuffer(start int) error { } func (c *connect) sendData(block *proto.Block, name string) error { + if c.isClosed() { + err := errors.New("attempted sending on closed connection") + c.debugf("[send data] err: %v", err) + return err + } + c.debugf("[send data] compression=%q", c.compression) c.buffer.PutByte(proto.ClientData) c.buffer.PutString(name) @@ -222,6 +279,7 @@ func (c *connect) sendData(block *proto.Block, name string) error { if err := block.EncodeHeader(c.buffer, c.revision); err != nil { return err } + for i := range block.Columns { if err := block.EncodeColumn(c.buffer, c.revision, i); err != nil { return err @@ -237,42 +295,59 @@ func (c *connect) sendData(block *proto.Block, name string) error { compressionOffset = 0 } } + if err := c.compressBuffer(compressionOffset); err != nil { return err } + if err := c.flush(); err != nil { switch { case errors.Is(err, syscall.EPIPE): c.debugf("[send data] pipe is broken, closing connection") - c.closed = true + c.setClosed() case errors.Is(err, io.EOF): c.debugf("[send data] unexpected EOF, closing connection") - c.closed = true + c.setClosed() default: c.debugf("[send data] unexpected error: %v", err) } return err } + defer func() { c.buffer.Reset() }() + return nil } func (c *connect) readData(ctx context.Context, packet byte, compressible bool) (*proto.Block, error) { + if c.isClosed() { + err := errors.New("attempted reading on closed connection") + c.debugf("[read data] err: %v", err) + return nil, err + } + + if c.reader == nil { + err := errors.New("attempted reading on nil reader") + c.debugf("[read data] err: %v", err) + return nil, err + } + if _, err := c.reader.Str(); err != nil { c.debugf("[read data] str error: %v", err) return nil, err } + if compressible && c.compression != CompressionNone { c.reader.EnableCompression() defer c.reader.DisableCompression() } - opts := queryOptions(ctx) + userLocation := queryOptionsUserLocation(ctx) location := c.server.Timezone - if opts.userLocation != nil { - location = opts.userLocation + if userLocation != nil { + location = userLocation } block := proto.Block{Timezone: location} @@ -280,6 +355,7 @@ func (c *connect) readData(ctx context.Context, packet byte, compressible bool) c.debugf("[read data] decode error: %v", err) return nil, err } + block.Packet = packet c.debugf("[read data] compression=%q. block: columns=%d, rows=%d", c.compression, len(block.Columns), block.Rows()) return &block, nil @@ -290,10 +366,12 @@ func (c *connect) flush() error { // Nothing to flush. return nil } + n, err := c.conn.Write(c.buffer.Buf) if err != nil { - return errors.Wrap(err, "write") + return fmt.Errorf("write: %w", err) } + if n != len(c.buffer.Buf) { return errors.New("wrote less than expected") } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go index 38329900..41090d24 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go @@ -19,40 +19,28 @@ package clickhouse import ( "context" + "errors" "fmt" "os" "regexp" - "strings" + "slices" + "syscall" "time" - "github.com/pkg/errors" - "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) -var splitInsertRe = regexp.MustCompile(`(?i)\sVALUES\s*\(`) -var columnMatch = regexp.MustCompile(`.*\((?P.+)\)$`) +var insertMatch = regexp.MustCompile(`(?i)(INSERT\s+INTO\s+[^( ]+(?:\s*\([^()]*(?:\([^()]*\)[^()]*)*\))?)(?:\s*VALUES)?`) +var columnMatch = regexp.MustCompile(`INSERT INTO .+\s\((?P.+)\)$`) func (c *connect) prepareBatch(ctx context.Context, query string, opts driver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (driver.Batch, error) { - //defer func() { - // if err := recover(); err != nil { - // fmt.Printf("panic occurred on %d:\n", c.num) - // } - //}() - query = splitInsertRe.Split(query, -1)[0] - colMatch := columnMatch.FindStringSubmatch(query) - var columns []string - if len(colMatch) == 2 { - columns = strings.Split(colMatch[1], ",") - for i := range columns { - columns[i] = strings.Trim(strings.TrimSpace(columns[i]), "`\"") - } - } - if !strings.HasSuffix(strings.TrimSpace(strings.ToUpper(query)), "VALUES") { - query += " VALUES" + query, _, queryColumns, verr := extractNormalizedInsertQueryAndColumns(query) + if verr != nil { + return nil, verr } + options := queryOptions(ctx) if deadline, ok := ctx.Deadline(); ok { c.conn.SetDeadline(deadline) @@ -71,19 +59,20 @@ func (c *connect) prepareBatch(ctx context.Context, query string, opts driver.Pr return nil, err } // resort batch to specified columns - if err = block.SortColumns(columns); err != nil { + if err = block.SortColumns(queryColumns); err != nil { return nil, err } b := &batch{ - ctx: ctx, - query: query, - conn: c, - block: block, - released: false, - connRelease: release, - connAcquire: acquire, - onProcess: onProcess, + ctx: ctx, + query: query, + conn: c, + block: block, + released: false, + connRelease: release, + connAcquire: acquire, + onProcess: onProcess, + closeOnFlush: opts.CloseOnFlush, } if opts.ReleaseConnection { @@ -94,16 +83,17 @@ func (c *connect) prepareBatch(ctx context.Context, query string, opts driver.Pr } type batch struct { - err error - ctx context.Context - query string - conn *connect - sent bool // sent signalize that batch is send to ClickHouse. - released bool // released signalize that conn was returned to pool and can't be used. - block *proto.Block - connRelease func(*connect, error) - connAcquire func(context.Context) (*connect, error) - onProcess *onProcess + err error + ctx context.Context + query string + conn *connect + sent bool // sent signalize that batch is send to ClickHouse. + released bool // released signalize that conn was returned to pool and can't be used. + closeOnFlush bool // closeOnFlush signalize that batch should close query and release conn when use Flush + block *proto.Block + connRelease func(*connect, error) + connAcquire func(context.Context) (*connect, error) + onProcess *onProcess } func (b *batch) release(err error) { @@ -131,14 +121,51 @@ func (b *batch) Append(v ...any) error { if b.err != nil { return b.err } + + if len(v) > 0 { + if r, ok := v[0].(*rows); ok { + return b.appendRowsBlocks(r) + } + } + if err := b.block.Append(v...); err != nil { - b.err = errors.Wrap(ErrBatchInvalid, err.Error()) + b.err = fmt.Errorf("%w: %w", ErrBatchInvalid, err) b.release(err) return err } return nil } +// appendRowsBlocks is an experimental feature that allows rows blocks be appended directly to the batch. +// This API is not stable and may be changed in the future. +// See: tests/batch_block_test.go +func (b *batch) appendRowsBlocks(r *rows) error { + var lastReadLock *proto.Block + var blockNum int + + for r.Next() { + if lastReadLock == nil { // make sure the first block is logged + b.conn.debugf("[batch.appendRowsBlocks] blockNum = %d", blockNum) + } + + // rows.Next() will read the next block from the server only if the current block is empty + // only if new block is available we should flush the current block + // the last block will be handled by the batch.Send() method + if lastReadLock != nil && lastReadLock != r.block { + if err := b.Flush(); err != nil { + return err + } + blockNum++ + b.conn.debugf("[batch.appendRowsBlocks] blockNum = %d", blockNum) + } + + b.block = r.block + lastReadLock = r.block + } + + return nil +} + func (b *batch) AppendStruct(v any) error { if b.err != nil { return b.err @@ -259,8 +286,15 @@ func (b *batch) Flush() error { } if b.block.Rows() != 0 { if err := b.conn.sendData(b.block, ""); err != nil { + // broken pipe/conn reset aren't generally recoverable on retry + if errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { + b.release(err) + } return err } + if b.closeOnFlush { + b.release(b.closeQuery()) + } } b.block.Reset() return nil @@ -270,6 +304,10 @@ func (b *batch) Rows() int { return b.block.Rows() } +func (b *batch) Columns() []column.Interface { + return slices.Clone(b.block.Columns) +} + func (b *batch) closeQuery() error { if err := b.conn.sendData(&proto.Block{}, ""); err != nil { return err diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go index a5d9e9c2..bc9d11c0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go @@ -24,9 +24,9 @@ import ( "compress/zlib" "context" "database/sql/driver" + "errors" "fmt" "io" - "io/ioutil" "log" "mime/multipart" "net" @@ -43,7 +43,6 @@ import ( chproto "github.com/ClickHouse/ch-go/proto" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" "github.com/andybalholm/brotli" - "github.com/pkg/errors" ) const ( @@ -76,49 +75,32 @@ type HTTPReaderWriter struct { method CompressionMethod } -func (rw HTTPReaderWriter) read(res *http.Response) ([]byte, error) { +// NewReader will return a reader that will decompress data if needed. +func (rw *HTTPReaderWriter) NewReader(res *http.Response) (io.Reader, error) { enc := res.Header.Get("Content-Encoding") if !res.Uncompressed && rw.method.String() == enc { switch rw.method { case CompressionGZIP: reader := rw.reader.(*gzip.Reader) - defer reader.Close() if err := reader.Reset(res.Body); err != nil { return nil, err } - body, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - return body, nil + return reader, nil case CompressionDeflate: - reader := rw.reader.(io.ReadCloser) - defer reader.Close() - if err := rw.reader.(flate.Resetter).Reset(res.Body, nil); err != nil { - return nil, err - } - body, err := ioutil.ReadAll(reader) - if err != nil { + reader := rw.reader + if err := reader.(flate.Resetter).Reset(res.Body, nil); err != nil { return nil, err } - return body, nil + return reader, nil case CompressionBrotli: reader := rw.reader.(*brotli.Reader) if err := reader.Reset(res.Body); err != nil { return nil, err } - body, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - return body, nil + return reader, nil } } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - return body, nil + return res.Body, nil } func (rw *HTTPReaderWriter) reset(pw *io.PipeWriter) io.WriteCloser { @@ -141,7 +123,12 @@ func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpCon var debugf = func(format string, v ...any) {} if opt.Debug { if opt.Debugf != nil { - debugf = opt.Debugf + debugf = func(format string, v ...any) { + opt.Debugf( + "[clickhouse][conn=%d][%s] "+format, + append([]interface{}{num, addr}, v...)..., + ) + } } else { debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse][conn=%d][%s]", num, addr), 0).Printf } @@ -179,6 +166,9 @@ func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpCon headers["X-ClickHouse-User"] = opt.Auth.Username if len(opt.Auth.Password) > 0 { headers["X-ClickHouse-Key"] = opt.Auth.Password + headers["X-ClickHouse-SSL-Certificate-Auth"] = "off" + } else { + headers["X-ClickHouse-SSL-Certificate-Auth"] = "on" } } @@ -211,12 +201,18 @@ func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpCon query.Set("default_format", "Native") u.RawQuery = query.Encode() + httpProxy := http.ProxyFromEnvironment + if opt.HTTPProxyURL != nil { + httpProxy = http.ProxyURL(opt.HTTPProxyURL) + } + t := &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: httpProxy, DialContext: (&net.Dialer{ Timeout: opt.DialTimeout, }).DialContext, MaxIdleConns: 1, + MaxConnsPerHost: opt.HttpMaxConnsPerHost, IdleConnTimeout: opt.ConnMaxLifetime, ResponseHeaderTimeout: opt.ReadTimeout, TLSClientConfig: opt.TLS, @@ -235,7 +231,7 @@ func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpCon url: u, buffer: new(chproto.Buffer), compression: opt.Compression.Method, - blockCompressor: compress.NewWriter(), + blockCompressor: compress.NewWriter(compress.Level(opt.Compression.Level), compress.Method(opt.Compression.Method)), compressionPool: compressionPool, blockBufferSize: opt.BlockBufferSize, headers: headers, @@ -261,7 +257,7 @@ func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpCon url: u, buffer: new(chproto.Buffer), compression: opt.Compression.Method, - blockCompressor: compress.NewWriter(), + blockCompressor: compress.NewWriter(compress.Level(opt.Compression.Level), compress.Method(opt.Compression.Method)), compressionPool: compressionPool, location: location, blockBufferSize: opt.BlockBufferSize, @@ -382,19 +378,18 @@ func (h *httpConnect) writeData(block *proto.Block) error { if h.compression == CompressionLZ4 || h.compression == CompressionZSTD { // Performing compression. Supported and requires data := h.buffer.Buf[start:] - if err := h.blockCompressor.Compress(compress.Method(h.compression), data); err != nil { - return errors.Wrap(err, "compress") + if err := h.blockCompressor.Compress(data); err != nil { + return fmt.Errorf("compress: %w", err) } h.buffer.Buf = append(h.buffer.Buf[:start], h.blockCompressor.Data...) } return nil } -func (h *httpConnect) readData(ctx context.Context, reader *chproto.Reader) (*proto.Block, error) { - opts := queryOptions(ctx) +func (h *httpConnect) readData(reader *chproto.Reader, timezone *time.Location) (*proto.Block, error) { location := h.location - if opts.userLocation != nil { - location = opts.userLocation + if timezone != nil { + location = timezone } block := proto.Block{Timezone: location} @@ -437,27 +432,21 @@ func (h *httpConnect) sendQuery(ctx context.Context, query string, options *Quer func (h *httpConnect) readRawResponse(response *http.Response) (body []byte, err error) { rw := h.compressionPool.Get() - defer response.Body.Close() defer h.compressionPool.Put(rw) - if body, err = rw.read(response); err != nil { + + reader, err := rw.NewReader(response) + if err != nil { return nil, err } if h.compression == CompressionLZ4 || h.compression == CompressionZSTD { - result := make([]byte, len(body)) - reader := chproto.NewReader(bytes.NewReader(body)) - reader.EnableCompression() - defer reader.DisableCompression() - for { - b, err := reader.ReadByte() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return nil, err - } - result = append(result, b) - } - return result, nil + chReader := chproto.NewReader(reader) + chReader.EnableCompression() + reader = chReader + } + + body, err = io.ReadAll(reader) + if err != nil && !errors.Is(err, io.EOF) { + return nil, err } return body, nil } @@ -550,14 +539,13 @@ func (h *httpConnect) executeRequest(req *http.Request) (*http.Response, error) if err != nil { return nil, err } - if resp.StatusCode != http.StatusOK { + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() msg, err := h.readRawResponse(resp) - if err != nil { return nil, fmt.Errorf("clickhouse [execute]:: %d code: failed to read the response: %w", resp.StatusCode, err) } - return nil, fmt.Errorf("clickhouse [execute]:: %d code: %s", resp.StatusCode, string(msg)) } return resp, nil diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go index 4a748de6..3e197f0b 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go @@ -20,7 +20,6 @@ package clickhouse import ( "context" "io" - "io/ioutil" ) func (h *httpConnect) asyncInsert(ctx context.Context, query string, wait bool, args ...any) error { @@ -43,7 +42,7 @@ func (h *httpConnect) asyncInsert(ctx context.Context, query string, wait bool, if res != nil { defer res.Body.Close() // we don't care about result, so just discard it to reuse connection - _, _ = io.Copy(ioutil.Discard, res.Body) + _, _ = io.Copy(io.Discard, res.Body) } return err diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go index d64faeb3..b4b27920 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go @@ -19,63 +19,53 @@ package clickhouse import ( "context" - "errors" "fmt" + "io" + "slices" + "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" - "io" - "io/ioutil" - "regexp" - "strings" ) -// \x60 represents a backtick -var httpInsertRe = regexp.MustCompile(`(?i)^INSERT INTO\s+\x60?([\w.^\(]+)\x60?\s*(\([^\)]*\))?`) - // release is ignored, because http used by std with empty release function. // Also opts ignored because all options unused in http batch. func (h *httpConnect) prepareBatch(ctx context.Context, query string, opts driver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (driver.Batch, error) { - matches := httpInsertRe.FindStringSubmatch(query) - if len(matches) < 3 { - return nil, errors.New("cannot get table name from query") - } - tableName := matches[1] - var rColumns []string - if matches[2] != "" { - colMatch := strings.TrimSuffix(strings.TrimPrefix(matches[2], "("), ")") - rColumns = strings.Split(colMatch, ",") - for i := range rColumns { - rColumns[i] = strings.Trim(strings.TrimSpace(rColumns[i]), "`") - } + query, tableName, queryColumns, err := extractNormalizedInsertQueryAndColumns(query) + if err != nil { + return nil, err } - query = "INSERT INTO " + tableName + " FORMAT Native" - queryTableSchema := "DESCRIBE TABLE " + tableName - r, err := h.query(ctx, release, queryTableSchema) + + describeTableQuery := fmt.Sprintf("DESCRIBE TABLE %s", tableName) + r, err := h.query(ctx, release, describeTableQuery) if err != nil { return nil, err } block := &proto.Block{} - // get Table columns and types columns := make(map[string]string) var colNames []string for r.Next() { var ( - colName string - colType string - ignore string + colName string + colType string + default_type string + ignore string ) - if err = r.Scan(&colName, &colType, &ignore, &ignore, &ignore, &ignore, &ignore); err != nil { + if err = r.Scan(&colName, &colType, &default_type, &ignore, &ignore, &ignore, &ignore); err != nil { return nil, err } + // these column types cannot be specified in INSERT queries + if default_type == "MATERIALIZED" || default_type == "ALIAS" { + continue + } colNames = append(colNames, colName) columns[colName] = colType } - switch len(rColumns) { + switch len(queryColumns) { case 0: for _, colName := range colNames { if err = block.AddColumn(colName, column.Type(columns[colName])); err != nil { @@ -84,7 +74,7 @@ func (h *httpConnect) prepareBatch(ctx context.Context, query string, opts drive } default: // user has requested specific columns so only include these - for _, colName := range rColumns { + for _, colName := range queryColumns { if colType, ok := columns[colName]; ok { if err = block.AddColumn(colName, column.Type(colType)); err != nil { return nil, err @@ -194,6 +184,7 @@ func (b *httpBatch) Send() (err error) { headers["Content-Encoding"] = b.conn.compression.String() case CompressionZSTD, CompressionLZ4: options.settings["decompress"] = "1" + options.settings["compress"] = "1" } go func() { @@ -224,7 +215,7 @@ func (b *httpBatch) Send() (err error) { if res != nil { defer res.Body.Close() // we don't care about result, so just discard it to reuse connection - _, _ = io.Copy(ioutil.Discard, res.Body) + _, _ = io.Copy(io.Discard, res.Body) } return err @@ -234,4 +225,8 @@ func (b *httpBatch) Rows() int { return b.block.Rows() } +func (b *httpBatch) Columns() []column.Interface { + return slices.Clone(b.block.Columns) +} + var _ driver.Batch = (*httpBatch)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_exec.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_exec.go index 0af9de0b..75198eb1 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_exec.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_exec.go @@ -20,7 +20,6 @@ package clickhouse import ( "context" "io" - "io/ioutil" ) func (h *httpConnect) exec(ctx context.Context, query string, args ...any) error { @@ -34,7 +33,7 @@ func (h *httpConnect) exec(ctx context.Context, query string, args ...any) error if res != nil { defer res.Body.Close() // we don't care about result, so just discard it to reuse connection - _, _ = io.Copy(ioutil.Discard, res.Body) + _, _ = io.Copy(io.Discard, res.Body) } return err diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go index 3716a285..7ee12002 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_query.go @@ -18,12 +18,12 @@ package clickhouse import ( - "bytes" "context" "errors" + "io" + chproto "github.com/ClickHouse/ch-go/proto" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" - "io" ) // release is ignored, because http used by std with empty release function @@ -50,52 +50,48 @@ func (h *httpConnect) query(ctx context.Context, release func(*connect, error), if err != nil { return nil, err } - defer res.Body.Close() - // detect compression from http Content-Encoding header - note user will need to have set enable_http_compression - // for CH to respond with compressed data - we don't set this automatically as they might not have permissions - var body []byte - //adding Accept-Encoding:gzip on your request means response won’t be automatically decompressed per https://github.com/golang/go/blob/master/src/net/http/transport.go#L182-L190 - - rw := h.compressionPool.Get() - body, err = rw.read(res) - bufferSize := h.blockBufferSize - if options.blockBufferSize > 0 { - // allow block buffer sze to be overridden per query - bufferSize = options.blockBufferSize - } - var ( - errCh = make(chan error) - stream = make(chan *proto.Block, bufferSize) - ) - if len(body) == 0 { - // queries with no results can get an empty body - go func() { - close(stream) - close(errCh) - }() + if res.ContentLength == 0 { + block := &proto.Block{} return &rows{ - err: nil, - stream: stream, - errors: errCh, - block: &proto.Block{}, - columns: []string{}, + block: block, + columns: block.ColumnsNames(), structMap: &structMap{}, }, nil } + + rw := h.compressionPool.Get() + // The HTTPReaderWriter.NewReader will create a reader that will decompress it if needed, + // cause adding Accept-Encoding:gzip on your request means response won’t be automatically decompressed + // per https://github.com/golang/go/blob/master/src/net/http/transport.go#L182-L190. + // Note user will need to have set enable_http_compression for CH to respond with compressed data. we don't set this + // automatically as they might not have permissions. + reader, err := rw.NewReader(res) if err != nil { + res.Body.Close() + h.compressionPool.Put(rw) return nil, err } - h.compressionPool.Put(rw) - reader := chproto.NewReader(bytes.NewReader(body)) - block, err := h.readData(ctx, reader) - if err != nil { + chReader := chproto.NewReader(reader) + block, err := h.readData(chReader, options.userLocation) + if err != nil && !errors.Is(err, io.EOF) { + res.Body.Close() + h.compressionPool.Put(rw) return nil, err } + bufferSize := h.blockBufferSize + if options.blockBufferSize > 0 { + // allow block buffer sze to be overridden per query + bufferSize = options.blockBufferSize + } + var ( + errCh = make(chan error) + stream = make(chan *proto.Block, bufferSize) + ) go func() { for { - block, err := h.readData(ctx, reader) + block, err := h.readData(chReader, options.userLocation) if err != nil { // ch-go wraps EOF errors if !errors.Is(err, io.EOF) { @@ -110,10 +106,15 @@ func (h *httpConnect) query(ctx context.Context, release func(*connect, error), case stream <- block: } } + res.Body.Close() + h.compressionPool.Put(rw) close(stream) close(errCh) }() + if block == nil { + block = &proto.Block{} + } return &rows{ block: block, stream: stream, diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go index 967e2ffc..ca452a71 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_process.go @@ -19,9 +19,11 @@ package clickhouse import ( "context" + "errors" "fmt" - "github.com/ClickHouse/clickhouse-go/v2/lib/proto" "io" + + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) type onProcess struct { @@ -33,51 +35,137 @@ type onProcess struct { } func (c *connect) firstBlock(ctx context.Context, on *onProcess) (*proto.Block, error) { + // if context is already timedout/cancelled — we're done + select { + case <-ctx.Done(): + c.cancel() + return nil, ctx.Err() + default: + } + + // do reads in background + resultCh := make(chan *proto.Block, 1) + errCh := make(chan error, 1) + + go func() { + block, err := c.firstBlockImpl(ctx, on) + if err != nil { + errCh <- err + return + } + resultCh <- block + }() + + // select on context or read channels (results/errors) + select { + case <-ctx.Done(): + c.cancel() + return nil, ctx.Err() + + case err := <-errCh: + return nil, err + + case block := <-resultCh: + return block, nil + } +} + +func (c *connect) firstBlockImpl(ctx context.Context, on *onProcess) (*proto.Block, error) { + c.readerMutex.Lock() + defer c.readerMutex.Unlock() + for { - select { - case <-ctx.Done(): - c.cancel() - return nil, ctx.Err() - default: + if c.reader == nil { + return nil, errors.New("unexpected state: c.reader is nil") } + packet, err := c.reader.ReadByte() if err != nil { return nil, err } + switch packet { case proto.ServerData: return c.readData(ctx, packet, true) + case proto.ServerEndOfStream: c.debugf("[end of stream]") return nil, io.EOF + default: if err := c.handle(ctx, packet, on); err != nil { + // handling error, return return nil, err } + + // handled okay, read next byte } } } func (c *connect) process(ctx context.Context, on *onProcess) error { + // if context is already timedout/cancelled — we're done + select { + case <-ctx.Done(): + c.cancel() + return ctx.Err() + default: + } + + // do reads in background + errCh := make(chan error, 1) + doneCh := make(chan bool, 1) + + go func() { + err := c.processImpl(ctx, on) + if err != nil { + errCh <- err + return + } + + doneCh <- true + }() + + // select on context or read channel (errors) + select { + case <-ctx.Done(): + c.cancel() + return ctx.Err() + + case err := <-errCh: + return err + + case <-doneCh: + return nil + } +} + +func (c *connect) processImpl(ctx context.Context, on *onProcess) error { + c.readerMutex.Lock() + defer c.readerMutex.Unlock() + for { - select { - case <-ctx.Done(): - c.cancel() - return ctx.Err() - default: + if c.reader == nil { + return errors.New("unexpected state: c.reader is nil") } + packet, err := c.reader.ReadByte() if err != nil { return err } + switch packet { case proto.ServerEndOfStream: c.debugf("[end of stream]") return nil } + if err := c.handle(ctx, packet, on); err != nil { + // handling error, return return err } + + // handled okay, read next byte } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go index 0fef95ca..dbd03939 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_query.go @@ -65,7 +65,7 @@ func (c *connect) query(ctx context.Context, release func(*connect, error), quer bufferSize = options.blockBufferSize } var ( - errors = make(chan error) + errors = make(chan error, 1) stream = make(chan *proto.Block, bufferSize) ) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go index 67cd2c8d..7f4d7c30 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/context.go @@ -19,6 +19,7 @@ package clickhouse import ( "context" + "maps" "time" "github.com/ClickHouse/clickhouse-go/v2/ext" @@ -43,12 +44,13 @@ type CustomSetting struct { type Parameters map[string]string type ( QueryOption func(*QueryOptions) error + AsyncOptions struct { + ok bool + wait bool + } QueryOptions struct { - span trace.SpanContext - async struct { - ok bool - wait bool - } + span trace.SpanContext + async AsyncOptions queryID string quotaKey string events struct { @@ -163,26 +165,68 @@ func ignoreExternalTables() QueryOption { } } +// Context returns a derived context with the given ClickHouse QueryOptions. +// Existing QueryOptions will be overwritten per option if present. +// The QueryOptions Settings map will be initialized if nil. func Context(parent context.Context, options ...QueryOption) context.Context { - opt := queryOptions(parent) + var opt QueryOptions + if ctxOpt, ok := parent.Value(_contextOptionKey).(QueryOptions); ok { + opt = ctxOpt + } + for _, f := range options { f(&opt) } + + if opt.settings == nil { + opt.settings = make(Settings) + } + return context.WithValue(parent, _contextOptionKey, opt) } +// queryOptions returns a mutable copy of the QueryOptions struct within the given context. +// If ClickHouse context was not provided, an empty struct with a valid Settings map is returned. +// If the context has a deadline greater than 1s then max_execution_time setting is appended. func queryOptions(ctx context.Context) QueryOptions { - if o, ok := ctx.Value(_contextOptionKey).(QueryOptions); ok { - if deadline, ok := ctx.Deadline(); ok { - if sec := time.Until(deadline).Seconds(); sec > 1 { - o.settings["max_execution_time"] = int(sec + 5) - } + var opt QueryOptions + + if ctxOpt, ok := ctx.Value(_contextOptionKey).(QueryOptions); ok { + opt = ctxOpt.clone() + } else { + opt = QueryOptions{ + settings: make(Settings), } - return o } - return QueryOptions{ - settings: make(Settings), + + deadline, ok := ctx.Deadline() + if !ok { + return opt + } + + if sec := time.Until(deadline).Seconds(); sec > 1 { + opt.settings["max_execution_time"] = int(sec + 5) } + + return opt +} + +// queryOptionsAsync returns the AsyncOptions struct within the given context's QueryOptions. +func queryOptionsAsync(ctx context.Context) AsyncOptions { + if opt, ok := ctx.Value(_contextOptionKey).(QueryOptions); ok { + return opt.async + } + + return AsyncOptions{} +} + +// queryOptionsUserLocation returns the *time.Location within the given context's QueryOptions. +func queryOptionsUserLocation(ctx context.Context) *time.Location { + if opt, ok := ctx.Value(_contextOptionKey).(QueryOptions); ok { + return opt.userLocation + } + + return nil } func (q *QueryOptions) onProcess() *onProcess { @@ -211,3 +255,31 @@ func (q *QueryOptions) onProcess() *onProcess { }, } } + +// clone returns a copy of QueryOptions where Settings and Parameters are safely mutable. +func (q *QueryOptions) clone() QueryOptions { + c := QueryOptions{ + span: q.span, + async: q.async, + queryID: q.queryID, + quotaKey: q.quotaKey, + events: q.events, + settings: nil, + parameters: nil, + external: q.external, + blockBufferSize: q.blockBufferSize, + userLocation: q.userLocation, + } + + if q.settings != nil { + c.settings = make(Settings, len(q.settings)) + maps.Copy(c.settings, q.settings) + } + + if q.parameters != nil { + c.parameters = make(Parameters, len(q.parameters)) + maps.Copy(c.parameters, q.parameters) + } + + return c +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list index 01d27626..b1c3263d 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list @@ -1 +1,216 @@ +Aaron Harlap +Abraham Adberstein +Aleksandr Petrukhin +Aleksandr Razumov +Alex Bocharov +Alex Salt +Alex Yang +Alex Zaytsev +Alexander Chumakov +Alexander Obukhov +Alexey Milovidov +Alexey Palazhchenko +Alvaro Tuso +Andrey Ustinov +Andžej Maciusovič +Antoine Toulme +Anton Kozlov +Aram Peres <6775216+aramperes@users.noreply.github.com> +Ashish Gaurav +Ather Shu +Barkhayot <157342287+hayotbisonai@users.noreply.github.com> +Beck Xu +Benjamin Rupp +Cem Sancak +Chao Wang +CharlesFeng +Chris Duncan +Daguang <28806852+DGuang21@users.noreply.github.com> +Dale McDiarmid +Dale Mcdiarmid +Damir Sayfutdinov +Dan Walters +Daniel Bershatsky +Danila Migalin +Danny.Dunn +Darío +Dave Josephsen +Dean Karn +Denis Gukov +Denis Krivak +Denys +Derek Perkins +Dmitry Markov +Dmitry Ponomarev +Dmitry Ponomarev +Earwin +Egor Samotoev <38672780+egsam98@users.noreply.github.com> +Egor.Gorlin +Eric Thomas +Eugene Formanenko +Evan Au +Ewan +Exca-DK +Felipe Lema +Florian Lehner +Fredz <513317651@qq.com> +Félix Mattrat +Geoff Genz +GitHub Action +Gregory Petrosyan +Guoqiang +Ian McGraw +Ildarov Gazimagomed <60438666+threadedstream@users.noreply.github.com> +Ivan +Ivan Blinkov +Ivan Blinkov +Ivan Ivanov +Ivan Yurochko +Ivan Zhukov +Jake Sylvestre +Jakub Chábek +James Hartig +Jan Was +Jeehoon Kim +Jeway <152489546+xjeway@users.noreply.github.com> +Jimmie Han +John Troy +Jon Aquino +Julian Maicher +Kevin Joiner <10265309+KevinJoiner@users.noreply.github.com> +Kirill Shvakov +Kiswono Prayogo +Kuba Kaflik +LI Tao +LIU Chao <42240939+xiaochaoren1@users.noreply.github.com> +Larry Snizek <72978202+larry-cdn77@users.noreply.github.com> +Lars Lehtonen +Leo Di Donato +Louis +Luc Vieillescazes +Lukas Eklund +LuoJi Zhu +Maksim Sokolnikov +Marek Vavrusa +Marek Vavruša +Marek Vavruša +Marek Vavruša +Mark Andrus Roberts +Mark Roberts +Matevz Mihalic +Max Justus Spransy +Michael Vigovsky +Michail Safronov +Miel Donkers +Miel Donkers +Miel Donkers +Mikhail Shustov +Nathan J Mehl <70606471+n-oden@users.noreply.github.com> +Nay Linn +Nikita Mikhaylov Nityananda Gohain +Oleg Strokachuk +Oleksandr Nikitin +Omurbek +Pablo Matias Gomez +PalanQu +Pavel Lobach +Paweł Rozlach <2124609+vespian@users.noreply.github.com> +Philip Dubé +Philip Dubé +Philipp Schreiber +Richard Artoul +Rim Zaidullin +Robert Sköld +Robin Hahling +Roman Usachev +Rory Crispin +Ross Rothenstine +Ross Rothenstine +Ryan +Selboo +Serge Klochkov <3175289+slvrtrn@users.noreply.github.com> +Sergei Sobolev +Sergey Melekhin +Sergey Salnikov +Sergey Salnikov +Shoshin Nikita +Shunsuke Otani +Spencer Torres +Srikanth Chekuri +Stepan Rabotkin <36516357+EpicStep@users.noreply.github.com> +Stepan Rabotkin +Stepan Rabotkin +Stephane Moreau +Stephanie Hingtgen +Taras Matsyk +Thibault Deutsch +Tomasz Czubocha +Tommy Li +Tsimafei Bredau +Valerii Pozdiaev <12141268+ValManP@users.noreply.github.com> +Varun Vasan V +Vespertinus +Vincent Bernat +Vitaly Orlov +Vyacheslav Stepanov +YenchangChan <43897067+YenchangChan@users.noreply.github.com> +Yoga Setiawan +Yury Korolev +Yury Yurochko +Zachary <136084364+gogingersnap777@users.noreply.github.com> +Zhehao Wu +a-dot +achmad-dev <83978538+achmad-dev@users.noreply.github.com> +albertlockett +alex +anton troyanov +astduman <41344369+Astemirdum@users.noreply.github.com> +barkhayot +caleb.xiang <90543061+cxiang03@users.noreply.github.com> +candiduslynx +chengzhi +chenlujjj <953546398@qq.com> +coldWater +count +daguang +daguang +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> +derN3rd +dmitry kuzmin +fengberlin +fengyun.rui +gaetan.rizio +guangwu +hexchain +hongker +hulb +ianmcgraw +ilker moral +jiyongwang +kshvakov +neverlee +nevseliev +ortyomka +pavel raskin +restrry +rogeryk +rtkaratekid <42547811+rtkaratekid@users.noreply.github.com> +sentanos +sundy-li <543950155@qq.com> +vahid sohrabloo +vasily.popov +viktorzaharov +vl4deee11 <44677024+vl4deee11@users.noreply.github.com> +vl4deee11 +vl4deee11 +vladislav doster +vladislav promzelev +vogrelord +vpromzelev +vvoronin +yuankun +yujiarista <127893837+yujiarista@users.noreply.github.com> +zxc111 +zxc9007 +李盼 diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string.go index f94ef288..abe6618a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/binary/string.go @@ -18,22 +18,9 @@ package binary import ( - "reflect" "unsafe" ) -// Copied from https://github.com/m3db/m3/blob/master/src/x/unsafe/string.go#L62 - func unsafeStr2Bytes(str string) []byte { - if len(str) == 0 { - return nil - } - var scratch []byte - { - slice := (*reflect.SliceHeader)(unsafe.Pointer(&scratch)) - slice.Len = len(str) - slice.Cap = len(str) - slice.Data = (*reflect.StringHeader)(unsafe.Pointer(&str)).Data - } - return scratch + return unsafe.Slice(unsafe.StringData(str), len(str)) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/dynamic.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/dynamic.go new file mode 100644 index 00000000..17d1e266 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/dynamic.go @@ -0,0 +1,33 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package chcol + +type Dynamic = Variant + +// NewDynamic creates a new Dynamic with the given value +func NewDynamic(v any) Dynamic { + return Dynamic{value: v} +} + +// NewDynamicWithType creates a new Dynamic with the given value and ClickHouse type +func NewDynamicWithType(v any, chType string) Dynamic { + return Dynamic{ + value: v, + chType: chType, + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/json.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/json.go new file mode 100644 index 00000000..943043a7 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/json.go @@ -0,0 +1,145 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package chcol + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "slices" + "strings" +) + +// JSONSerializer interface allows a struct to be manually converted to an optimized JSON structure instead of relying +// on recursive reflection. +// Note that the struct must be a pointer in order for the interface to be matched, reflection will be used otherwise. +type JSONSerializer interface { + SerializeClickHouseJSON() (*JSON, error) +} + +// JSONDeserializer interface allows a struct to load its data from an optimized JSON structure instead of relying +// on recursive reflection to set its fields. +type JSONDeserializer interface { + DeserializeClickHouseJSON(*JSON) error +} + +// ExtractJSONPathAs is a convenience function for asserting a path to a specific type. +// The underlying value is also extracted from its Dynamic wrapper if present. +func ExtractJSONPathAs[T any](o *JSON, path string) (T, bool) { + value, ok := o.valuesByPath[path] + if !ok || value == nil { + var empty T + return empty, false + } + + dynValue, ok := value.(Dynamic) + if !ok { + valueAs, ok := value.(T) + return valueAs, ok + } + + valueAs, ok := dynValue.value.(T) + return valueAs, ok +} + +// JSON represents a ClickHouse JSON type that can hold multiple possible types +type JSON struct { + valuesByPath map[string]any +} + +// NewJSON creates a new empty JSON value +func NewJSON() *JSON { + return &JSON{ + valuesByPath: make(map[string]any), + } +} + +func (o *JSON) ValuesByPath() map[string]any { + return o.valuesByPath +} + +func (o *JSON) SetValueAtPath(path string, value any) { + o.valuesByPath[path] = value +} + +func (o *JSON) ValueAtPath(path string) (any, bool) { + value, ok := o.valuesByPath[path] + return value, ok +} + +// NestedMap converts the flattened JSON data into a nested structure +func (o *JSON) NestedMap() map[string]any { + result := make(map[string]any) + + sortedPaths := make([]string, 0, len(o.valuesByPath)) + for path := range o.valuesByPath { + sortedPaths = append(sortedPaths, path) + } + slices.Sort(sortedPaths) + + for _, path := range sortedPaths { + value := o.valuesByPath[path] + if vt, ok := value.(Variant); ok && vt.Nil() { + continue + } + + parts := strings.Split(path, ".") + current := result + + for i := 0; i < len(parts)-1; i++ { + part := parts[i] + + if _, exists := current[part]; !exists { + current[part] = make(map[string]any) + } + + if next, ok := current[part].(map[string]any); ok { + current = next + } + } + current[parts[len(parts)-1]] = value + } + + return result +} + +// MarshalJSON implements the json.Marshaler interface +func (o *JSON) MarshalJSON() ([]byte, error) { + return json.Marshal(o.NestedMap()) +} + +// Scan implements the sql.Scanner interface +func (o *JSON) Scan(value interface{}) error { + switch vv := value.(type) { + case JSON: + o.valuesByPath = vv.valuesByPath + case *JSON: + o.valuesByPath = vv.valuesByPath + case map[string]any: + o.valuesByPath = vv + default: + return fmt.Errorf("JSON Scan value must be clickhouse.JSON or map[string]any") + } + + return nil +} + +// Value implements the driver.Valuer interface +func (o *JSON) Value() (driver.Value, error) { + return o, nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/variant.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/variant.go new file mode 100644 index 00000000..ac873969 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/chcol/variant.go @@ -0,0 +1,146 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package chcol + +import ( + "database/sql/driver" + "encoding/json" +) + +// Variant represents a ClickHouse Variant type that can hold multiple possible types +type Variant struct { + value any + chType string +} + +// NewVariant creates a new Variant with the given value +func NewVariant(v any) Variant { + return Variant{ + value: v, + chType: "", + } +} + +// NewVariantWithType creates a new Variant with the given value and ClickHouse type +func NewVariantWithType(v any, chType string) Variant { + return Variant{ + value: v, + chType: chType, + } +} + +// WithType creates a new Variant with the current value and given ClickHouse type +func (v Variant) WithType(chType string) Variant { + return Variant{ + value: v.value, + chType: chType, + } +} + +// Type returns the ClickHouse type as a string. +func (v Variant) Type() string { + return v.chType +} + +// HasType returns true if the value has a type ClickHouse included. +func (v Variant) HasType() bool { + return v.chType == "" +} + +// Nil returns true if the underlying value is nil. +func (v Variant) Nil() bool { + return v.value == nil +} + +// Any returns the underlying value as any. +func (v Variant) Any() any { + return v.value +} + +// Scan implements the sql.Scanner interface +func (v *Variant) Scan(value interface{}) error { + switch vv := value.(type) { + case Variant: + v.value = vv.value + v.chType = vv.chType + case *Variant: + v.value = vv.value + v.chType = vv.chType + default: + v.value = value + } + + return nil +} + +// Value implements the driver.Valuer interface +func (v Variant) Value() (driver.Value, error) { + return v, nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v Variant) MarshalJSON() ([]byte, error) { + if v.Nil() { + return []byte("null"), nil + } + + return json.Marshal(v.value) +} + +// UnmarshalJSON implements the json.Unmarshaler interface +func (v *Variant) UnmarshalJSON(data []byte) error { + if string(data) == "null" { + v.value = nil + return nil + } + + if err := json.Unmarshal(data, &v.value); err != nil { + return err + } + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface +func (v Variant) MarshalText() ([]byte, error) { + if v.Nil() { + return []byte(""), nil + } + + switch vv := v.value.(type) { + case string: + return []byte(vv), nil + case []byte: + return vv, nil + case json.RawMessage: + return vv, nil + } + + return json.Marshal(v.value) +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface +func (v *Variant) UnmarshalText(text []byte) error { + if len(text) == 0 { + v.value = nil + return nil + } + + v.value = string(text) + return nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go index e2db1825..2a0c17d4 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go @@ -192,6 +192,11 @@ func appendNullableRowPlain[T any](col *Array, arr []*T) error { func (col *Array) append(elem reflect.Value, level int) error { if level < col.depth { switch elem.Kind() { + // allows to traverse pointers to slices and slices cast to `any` + case reflect.Interface, reflect.Ptr: + if !elem.IsNil() { + return col.append(elem.Elem(), level) + } // reflect.Value.Len() & reflect.Value.Index() is called in `append` method which is only valid for // Slice, Array and String that make sense here. case reflect.Slice, reflect.Array, reflect.String: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go index e09c96ae..49e179cb 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go @@ -90,7 +90,7 @@ func (col *BigInt) Append(v any) (nulls []uint8, err error) { nulls = make([]uint8, len(v)) for i := range v { switch { - case v != nil: + case v[i] != nil: col.append(v[i]) default: nulls[i] = 1 diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go index d13781af..259a3cd8 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go @@ -25,6 +25,7 @@ import ( "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/chcol" "github.com/google/uuid" "github.com/paulmach/orb" "github.com/shopspring/decimal" @@ -136,7 +137,9 @@ func (t Type) Column(name string, tz *time.Location) (Interface, error) { case "Point": return &Point{name: name}, nil case "String": - return &String{name: name}, nil + return &String{name: name, col: colStrProvider(name)}, nil + case "SharedVariant": + return &SharedVariant{name: name}, nil case "Object('json')": return &JSONObject{name: name, root: true, tz: tz}, nil } @@ -146,6 +149,12 @@ func (t Type) Column(name string, tz *time.Location) (Interface, error) { return (&Map{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "Tuple("): return (&Tuple{name: name}).parse(t, tz) + case strings.HasPrefix(string(t), "Variant("): + return (&Variant{name: name}).parse(t, tz) + case strings.HasPrefix(string(t), "Dynamic"): + return (&Dynamic{name: name}).parse(t, tz) + case strings.HasPrefix(string(t), "JSON"): + return (&JSON{name: name}).parse(t, tz) case strings.HasPrefix(string(t), "Decimal("): return (&Decimal{name: name}).parse(t) case strings.HasPrefix(strType, "Nested("): @@ -255,6 +264,9 @@ var ( scanTypePolygon = reflect.TypeOf(orb.Polygon{}) scanTypeDecimal = reflect.TypeOf(decimal.Decimal{}) scanTypeMultiPolygon = reflect.TypeOf(orb.MultiPolygon{}) + scanTypeVariant = reflect.TypeOf(chcol.Variant{}) + scanTypeDynamic = reflect.TypeOf(chcol.Dynamic{}) + scanTypeJSON = reflect.TypeOf(chcol.JSON{}) ) func (col *Float32) Name() string { @@ -656,7 +668,9 @@ func (col *Int8) Append(v any) (nulls []uint8, err error) { nulls = make([]uint8, len(v)) for i := range v { val := int8(0) - if *v[i] { + if v[i] == nil { + nulls[i] = 1 + } else if *v[i] { val = 1 } col.col.Append(val) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen_option.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen_option.go new file mode 100644 index 00000000..6a883527 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen_option.go @@ -0,0 +1,46 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package column + +import "github.com/ClickHouse/ch-go/proto" + +// ColStrProvider defines provider of proto.ColStr +type ColStrProvider func(name string) proto.ColStr + +// colStrProvider provide proto.ColStr for Column() when type is String +var colStrProvider ColStrProvider = defaultColStrProvider + +// defaultColStrProvider defines sample provider for proto.ColStr +func defaultColStrProvider(string) proto.ColStr { + return proto.ColStr{} +} + +// issue: https://github.com/ClickHouse/clickhouse-go/issues/1164 +// WithAllocBufferColStrProvider allow pre alloc buffer cap for proto.ColStr +// +// It is more suitable for scenarios where a lot of data is written in batches +func WithAllocBufferColStrProvider(cap int) { + colStrProvider = func(string) proto.ColStr { + return proto.ColStr{Buf: make([]byte, 0, cap)} + } +} + +// WithColStrProvider more flexible than WithAllocBufferColStrProvider, such as use sync.Pool +func WithColStrProvider(provider ColStrProvider) { + colStrProvider = provider +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go index bc4f77db..7453afba 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go @@ -21,9 +21,10 @@ import ( "database/sql" "database/sql/driver" "fmt" - "github.com/ClickHouse/ch-go/proto" "reflect" "time" + + "github.com/ClickHouse/ch-go/proto" ) var ( @@ -101,9 +102,6 @@ func (col *Date) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []time.Time: for _, t := range v { - if err := dateOverflow(minDate, maxDate, t, defaultDateFormatNoZone); err != nil { - return nil, err - } col.col.Append(t) } case []*time.Time: @@ -111,9 +109,6 @@ func (col *Date) Append(v any) (nulls []uint8, err error) { for i, v := range v { switch { case v != nil: - if err := dateOverflow(minDate, maxDate, *v, defaultDateFormatNoZone); err != nil { - return nil, err - } col.col.Append(*v) default: nulls[i] = 1 @@ -181,16 +176,10 @@ func (col *Date) Append(v any) (nulls []uint8, err error) { func (col *Date) AppendRow(v any) error { switch v := v.(type) { case time.Time: - if err := dateOverflow(minDate, maxDate, v, defaultDateFormatNoZone); err != nil { - return err - } col.col.Append(v) case *time.Time: switch { case v != nil: - if err := dateOverflow(minDate, maxDate, *v, defaultDateFormatNoZone); err != nil { - return err - } col.col.Append(*v) default: col.col.Append(time.Time{}) @@ -257,19 +246,11 @@ func parseDate(value string, minDate time.Time, maxDate time.Time, location *tim if location == nil { location = time.Local } - - defer func() { - if err == nil { - err = dateOverflow(minDate, maxDate, tv, defaultDateFormatNoZone) - } - }() if tv, err = time.Parse(defaultDateFormatWithZone, value); err == nil { return tv, nil } if tv, err = time.Parse(defaultDateFormatNoZone, value); err == nil { - return time.Date( - tv.Year(), tv.Month(), tv.Day(), tv.Hour(), tv.Minute(), tv.Second(), tv.Nanosecond(), location, - ), nil + return getTimeWithDifferentLocation(tv, location), nil } return time.Time{}, err } @@ -289,10 +270,10 @@ func (col *Date) Encode(buffer *proto.Buffer) { func (col *Date) row(i int) time.Time { t := col.col.Row(i) - if col.location != nil { + if col.location != nil && col.location != time.UTC { // proto.Date is normalized as time.Time with UTC timezone. // We make sure Date return from ClickHouse matches server timezone or user defined location. - t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), col.location) + t = getTimeWithDifferentLocation(t, col.location) } return t } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go index e23429de..51504293 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go @@ -21,14 +21,15 @@ import ( "database/sql" "database/sql/driver" "fmt" - "github.com/ClickHouse/ch-go/proto" "reflect" "time" + + "github.com/ClickHouse/ch-go/proto" ) var ( - minDate32, _ = time.Parse("2006-01-02 15:04:05", "1925-01-01 00:00:00") - maxDate32, _ = time.Parse("2006-01-02 15:04:05", "2283-11-11 00:00:00") + minDate32, _ = time.Parse("2006-01-02 15:04:05", "1900-01-01 00:00:00") + maxDate32, _ = time.Parse("2006-01-02 15:04:05", "2299-12-31 00:00:00") ) type Date32 struct { @@ -91,9 +92,6 @@ func (col *Date32) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []time.Time: for _, t := range v { - if err := dateOverflow(minDate32, maxDate32, t, "2006-01-02"); err != nil { - return nil, err - } col.col.Append(t) } case []*time.Time: @@ -101,9 +99,6 @@ func (col *Date32) Append(v any) (nulls []uint8, err error) { for i, v := range v { switch { case v != nil: - if err := dateOverflow(minDate32, maxDate32, *v, "2006-01-02"); err != nil { - return nil, err - } col.col.Append(*v) default: nulls[i] = 1 @@ -171,16 +166,10 @@ func (col *Date32) Append(v any) (nulls []uint8, err error) { func (col *Date32) AppendRow(v any) error { switch v := v.(type) { case time.Time: - if err := dateOverflow(minDate32, maxDate32, v, "2006-01-02"); err != nil { - return err - } col.col.Append(v) case *time.Time: switch { case v != nil: - if err := dateOverflow(minDate32, maxDate32, *v, "2006-01-02"); err != nil { - return err - } col.col.Append(*v) default: col.col.Append(time.Time{}) @@ -258,10 +247,10 @@ func (col *Date32) Encode(buffer *proto.Buffer) { func (col *Date32) row(i int) time.Time { t := col.col.Row(i) - if col.location != nil { + if col.location != nil && col.location != time.UTC { // proto.Date is normalized as time.Time with UTC timezone. // We make sure Date return from ClickHouse matches server timezone or user defined location. - t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), col.location) + t = getTimeWithDifferentLocation(t, col.location) } return t } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go index d5dfffad..04f24f86 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go @@ -21,11 +21,12 @@ import ( "database/sql" "database/sql/driver" "fmt" - "github.com/ClickHouse/ch-go/proto" "reflect" "strings" "time" + "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/timezone" ) @@ -121,7 +122,7 @@ func (col *DateTime) Append(v any) (nulls []uint8, err error) { nulls = make([]uint8, len(v)) for i := range v { switch { - case v != nil: + case v[i] != nil: col.col.Append(time.Unix(*v[i], 0)) default: col.col.Append(time.Time{}) @@ -131,9 +132,6 @@ func (col *DateTime) Append(v any) (nulls []uint8, err error) { case []time.Time: nulls = make([]uint8, len(v)) for i := range v { - if err := dateOverflow(minDateTime, maxDateTime, v[i], defaultDateTimeFormatNoZone); err != nil { - return nil, err - } col.col.Append(v[i]) } @@ -142,9 +140,6 @@ func (col *DateTime) Append(v any) (nulls []uint8, err error) { for i := range v { switch { case v[i] != nil: - if err := dateOverflow(minDateTime, maxDateTime, *v[i], defaultDateTimeFormatNoZone); err != nil { - return nil, err - } col.col.Append(*v[i]) default: nulls[i] = 1 @@ -223,16 +218,10 @@ func (col *DateTime) AppendRow(v any) error { col.col.Append(time.Time{}) } case time.Time: - if err := dateOverflow(minDateTime, maxDateTime, v, defaultDateTimeFormatNoZone); err != nil { - return err - } col.col.Append(v) case *time.Time: switch { case v != nil: - if err := dateOverflow(minDateTime, maxDateTime, *v, defaultDateTimeFormatNoZone); err != nil { - return err - } col.col.Append(*v) default: col.col.Append(time.Time{}) @@ -309,19 +298,11 @@ func (col *DateTime) row(i int) time.Time { } func (col *DateTime) parseDateTime(value string) (tv time.Time, err error) { - defer func() { - if err == nil { - err = dateOverflow(minDateTime, maxDateTime, tv, defaultDateFormatNoZone) - } - }() - if tv, err = time.Parse(defaultDateTimeFormatWithZone, value); err == nil { return tv, nil } if tv, err = time.Parse(defaultDateTimeFormatNoZone, value); err == nil { - return time.Date( - tv.Year(), tv.Month(), tv.Day(), tv.Hour(), tv.Minute(), tv.Second(), tv.Nanosecond(), time.Local, - ), nil + return getTimeWithDifferentLocation(tv, time.Local), nil } return time.Time{}, err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go index f5a5a948..668bc2ce 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go @@ -21,13 +21,14 @@ import ( "database/sql" "database/sql/driver" "fmt" - "github.com/ClickHouse/ch-go/proto" "math" "reflect" "strconv" "strings" "time" + "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/timezone" ) @@ -95,6 +96,10 @@ func (col *DateTime64) ScanType() reflect.Type { return scanTypeTime } +func (col *DateTime64) Precision() (int64, bool) { + return int64(col.col.Precision), col.col.PrecisionSet +} + func (col *DateTime64) Rows() int { return col.col.Rows() } @@ -142,7 +147,7 @@ func (col *DateTime64) Append(v any) (nulls []uint8, err error) { nulls = make([]uint8, len(v)) for i := range v { switch { - case v != nil: + case v[i] != nil: col.col.Append(time.UnixMilli(*v[i])) default: col.col.Append(time.UnixMilli(0)) @@ -152,9 +157,6 @@ func (col *DateTime64) Append(v any) (nulls []uint8, err error) { case []time.Time: nulls = make([]uint8, len(v)) for i := range v { - if err := dateOverflow(minDateTime64, maxDateTime64, v[i], "2006-01-02 15:04:05"); err != nil { - return nil, err - } col.col.Append(v[i]) } case []*time.Time: @@ -162,9 +164,6 @@ func (col *DateTime64) Append(v any) (nulls []uint8, err error) { for i := range v { switch { case v[i] != nil: - if err := dateOverflow(minDateTime64, maxDateTime64, *v[i], "2006-01-02 15:04:05"); err != nil { - return nil, err - } col.col.Append(*v[i]) default: col.col.Append(time.Time{}) @@ -227,16 +226,10 @@ func (col *DateTime64) AppendRow(v any) error { col.col.Append(time.Time{}) } case time.Time: - if err := dateOverflow(minDateTime64, maxDateTime64, v, "2006-01-02 15:04:05"); err != nil { - return err - } col.col.Append(v) case *time.Time: switch { case v != nil: - if err := dateOverflow(minDateTime64, maxDateTime64, *v, "2006-01-02 15:04:05"); err != nil { - return err - } col.col.Append(*v) default: col.col.Append(time.Time{}) @@ -318,9 +311,7 @@ func (col *DateTime64) parseDateTime(value string) (tv time.Time, err error) { return tv, nil } if tv, err = time.Parse(defaultDateTime64FormatNoZone, value); err == nil { - return time.Date( - tv.Year(), tv.Month(), tv.Day(), tv.Hour(), tv.Minute(), tv.Second(), tv.Nanosecond(), time.Local, - ), nil + return getTimeWithDifferentLocation(tv, time.Local), nil } return time.Time{}, err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go index 74b7d75f..4c7b18db 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go @@ -170,6 +170,32 @@ func (col *Decimal) Append(v any) (nulls []uint8, err error) { col.append(&value) } } + case []string: + nulls = make([]uint8, len(v)) + for i := range v { + d, err := decimal.NewFromString(v[i]) + if err != nil { + return nil, fmt.Errorf("could not convert \"%v\" to decimal: %w", v[i], err) + } + col.append(&d) + } + case []*string: + nulls = make([]uint8, len(v)) + for i := range v { + if v[i] == nil { + nulls[i] = 1 + value := decimal.New(0, 0) + col.append(&value) + + continue + } + + d, err := decimal.NewFromString(*v[i]) + if err != nil { + return nil, fmt.Errorf("could not convert \"%v\" to decimal: %w", *v[i], err) + } + col.append(&d) + } default: if valuer, ok := v.(driver.Valuer); ok { val, err := valuer.Value() @@ -201,6 +227,20 @@ func (col *Decimal) AppendRow(v any) error { if v != nil { value = *v } + case string: + d, err := decimal.NewFromString(v) + if err != nil { + return fmt.Errorf("could not convert \"%v\" to decimal: %w", v, err) + } + value = d + case *string: + if v != nil { + d, err := decimal.NewFromString(*v) + if err != nil { + return fmt.Errorf("could not convert \"%v\" to decimal: %w", *v, err) + } + value = d + } case nil: default: if valuer, ok := v.(driver.Valuer); ok { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/dynamic.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/dynamic.go new file mode 100644 index 00000000..65ddd098 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/dynamic.go @@ -0,0 +1,417 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package column + +import ( + "database/sql/driver" + "fmt" + "reflect" + "slices" + "strconv" + "strings" + "time" + + "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/chcol" +) + +const SupportedDynamicSerializationVersion = 1 +const DefaultMaxDynamicTypes = 32 + +type Dynamic struct { + chType Type + tz *time.Location + + name string + + maxTypes uint8 + totalTypes uint8 + typeNames []string + typeNamesIndex map[string]int + + variant Variant +} + +func (c *Dynamic) parse(t Type, tz *time.Location) (_ *Dynamic, err error) { + c.chType = t + c.tz = tz + tStr := string(t) + + // SharedVariant is special, and does not count against totalTypes + c.typeNamesIndex = make(map[string]int) + c.variant.columnTypeIndex = make(map[string]uint8) + sv, _ := Type("SharedVariant").Column("", tz) + c.addColumn(sv) + + c.maxTypes = DefaultMaxDynamicTypes + c.totalTypes = 0 // Reset to 0 after adding SharedVariant + + if tStr == "Dynamic" { + return c, nil + } + + if !strings.HasPrefix(tStr, "Dynamic(") || !strings.HasSuffix(tStr, ")") { + return nil, &UnsupportedColumnTypeError{t: t} + } + + typeParamsStr := strings.TrimPrefix(tStr, "Dynamic(") + typeParamsStr = strings.TrimSuffix(typeParamsStr, ")") + + if strings.HasPrefix(typeParamsStr, "max_types=") { + v := strings.TrimPrefix(typeParamsStr, "max_types=") + if maxTypes, err := strconv.Atoi(v); err == nil { + c.maxTypes = uint8(maxTypes) + } + } + + return c, nil +} + +func (c *Dynamic) addColumn(col Interface) { + typeName := string(col.Type()) + c.typeNames = append(c.typeNames, typeName) + c.typeNamesIndex[typeName] = len(c.typeNames) - 1 + c.totalTypes++ + c.variant.addColumn(col) +} + +func (c *Dynamic) Name() string { + return c.name +} + +func (c *Dynamic) Type() Type { + return c.chType +} + +func (c *Dynamic) Rows() int { + return c.variant.Rows() +} + +func (c *Dynamic) Row(i int, ptr bool) any { + typeIndex := c.variant.discriminators[i] + offsetIndex := c.variant.offsets[i] + var value any + var chType string + if typeIndex != NullVariantDiscriminator { + value = c.variant.columns[typeIndex].Row(offsetIndex, ptr) + chType = string(c.variant.columns[typeIndex].Type()) + } + + dyn := chcol.NewDynamicWithType(value, chType) + if ptr { + return &dyn + } + + return dyn +} + +func (c *Dynamic) ScanRow(dest any, row int) error { + typeIndex := c.variant.discriminators[row] + offsetIndex := c.variant.offsets[row] + var value any + var chType string + if typeIndex != NullVariantDiscriminator { + value = c.variant.columns[typeIndex].Row(offsetIndex, false) + chType = string(c.variant.columns[typeIndex].Type()) + } + + switch v := dest.(type) { + case *chcol.Dynamic: + dyn := chcol.NewDynamicWithType(value, chType) + *v = dyn + case **chcol.Dynamic: + dyn := chcol.NewDynamicWithType(value, chType) + **v = dyn + default: + if typeIndex == NullVariantDiscriminator { + return nil + } + + if err := c.variant.columns[typeIndex].ScanRow(dest, offsetIndex); err != nil { + return err + } + } + + return nil +} + +func (c *Dynamic) Append(v any) (nulls []uint8, err error) { + switch vv := v.(type) { + case []chcol.Dynamic: + for i, dyn := range vv { + err := c.AppendRow(dyn) + if err != nil { + return nil, fmt.Errorf("failed to AppendRow at index %d: %w", i, err) + } + } + + return nil, nil + case []*chcol.Dynamic: + for i, dyn := range vv { + err := c.AppendRow(dyn) + if err != nil { + return nil, fmt.Errorf("failed to AppendRow at index %d: %w", i, err) + } + } + + return nil, nil + default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(c.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + + return c.Append(val) + } + + return nil, &ColumnConverterError{ + Op: "Append", + To: string(c.chType), + From: fmt.Sprintf("%T", v), + } + } +} + +func (c *Dynamic) AppendRow(v any) error { + var requestedType string + switch vv := v.(type) { + case nil: + c.variant.appendNullRow() + return nil + case chcol.Dynamic: + requestedType = vv.Type() + v = vv.Any() + if vv.Nil() { + c.variant.appendNullRow() + return nil + } + case *chcol.Dynamic: + requestedType = vv.Type() + v = vv.Any() + if vv.Nil() { + c.variant.appendNullRow() + return nil + } + } + + if requestedType != "" { + var col Interface + colIndex, ok := c.typeNamesIndex[requestedType] + if ok { + col = c.variant.columns[colIndex] + } else { + newCol, err := Type(requestedType).Column("", c.tz) + if err != nil { + return fmt.Errorf("value \"%v\" cannot be stored in dynamic column %s with requested type %s: unable to append type: %w", v, c.chType, requestedType, err) + } + + c.addColumn(newCol) + colIndex = int(c.totalTypes) + col = newCol + } + + if err := col.AppendRow(v); err != nil { + return fmt.Errorf("value \"%v\" cannot be stored in dynamic column %s with requested type %s: %w", v, c.chType, requestedType, err) + } + + c.variant.appendDiscriminatorRow(uint8(colIndex)) + return nil + } + + // If preferred type wasn't provided, try each column + for i, col := range c.variant.columns { + if c.typeNames[i] == "SharedVariant" { + // Do not try to fit into SharedVariant + continue + } + + if err := col.AppendRow(v); err == nil { + c.variant.appendDiscriminatorRow(uint8(i)) + return nil + } + } + + // If no existing columns match, try matching a ClickHouse type from common Go types + inferredTypeName := inferClickHouseTypeFromGoType(v) + if inferredTypeName != "" { + return c.AppendRow(chcol.NewDynamicWithType(v, inferredTypeName)) + } + + return fmt.Errorf("value \"%v\" cannot be stored in dynamic column: no compatible types. hint: use clickhouse.DynamicWithType to wrap the value", v) +} + +func (c *Dynamic) sortColumnsForEncoding() { + previousTypeNames := make([]string, 0, len(c.typeNames)) + previousTypeNames = append(previousTypeNames, c.typeNames...) + slices.Sort(c.typeNames) + + for i, typeName := range c.typeNames { + c.typeNamesIndex[typeName] = i + c.variant.columnTypeIndex[typeName] = uint8(i) + } + + sortedDiscriminatorMap := make([]uint8, len(c.variant.columns)) + sortedColumns := make([]Interface, len(c.variant.columns)) + for i, typeName := range previousTypeNames { + correctIndex := c.typeNamesIndex[typeName] + + sortedDiscriminatorMap[i] = uint8(correctIndex) + sortedColumns[correctIndex] = c.variant.columns[i] + } + c.variant.columns = sortedColumns + + for i := range c.variant.discriminators { + if c.variant.discriminators[i] == NullVariantDiscriminator { + continue + } + + c.variant.discriminators[i] = sortedDiscriminatorMap[c.variant.discriminators[i]] + } +} + +func (c *Dynamic) encodeHeader(buffer *proto.Buffer) { + c.sortColumnsForEncoding() + + buffer.PutUInt64(SupportedDynamicSerializationVersion) + buffer.PutUVarInt(uint64(c.maxTypes)) + buffer.PutUVarInt(uint64(c.totalTypes)) + + for _, typeName := range c.typeNames { + if typeName == "SharedVariant" { + // SharedVariant is implicitly present in Dynamic, do not append to type names + continue + } + + buffer.PutString(typeName) + } + + c.variant.encodeHeader(buffer) +} + +func (c *Dynamic) encodeData(buffer *proto.Buffer) { + c.variant.encodeData(buffer) +} + +func (c *Dynamic) WriteStatePrefix(buffer *proto.Buffer) error { + c.encodeHeader(buffer) + + return nil +} + +func (c *Dynamic) Encode(buffer *proto.Buffer) { + c.encodeData(buffer) +} + +func (c *Dynamic) ScanType() reflect.Type { + return scanTypeDynamic +} + +func (c *Dynamic) Reset() { + c.variant.Reset() +} + +func (c *Dynamic) decodeHeader(reader *proto.Reader) error { + dynamicSerializationVersion, err := reader.UInt64() + if err != nil { + return fmt.Errorf("failed to read dynamic serialization version: %w", err) + } + + if dynamicSerializationVersion != SupportedDynamicSerializationVersion { + return fmt.Errorf("unsupported dynamic serialization version: %d", dynamicSerializationVersion) + } + + maxTypes, err := reader.UVarInt() + if err != nil { + return fmt.Errorf("failed to read max types for dynamic column: %w", err) + } + c.maxTypes = uint8(maxTypes) + + totalTypes, err := reader.UVarInt() + if err != nil { + return fmt.Errorf("failed to read total types for dynamic column: %w", err) + } + + sortedTypeNames := make([]string, 0, totalTypes+1) + for i := uint64(0); i < totalTypes; i++ { + typeName, err := reader.Str() + if err != nil { + return fmt.Errorf("failed to read type name at index %d for dynamic column: %w", i, err) + } + + sortedTypeNames = append(sortedTypeNames, typeName) + } + + sortedTypeNames = append(sortedTypeNames, "SharedVariant") + slices.Sort(sortedTypeNames) // Re-sort after adding SharedVariant + + c.typeNames = make([]string, 0, len(sortedTypeNames)) + c.typeNamesIndex = make(map[string]int, len(sortedTypeNames)) + c.variant.columns = make([]Interface, 0, len(sortedTypeNames)) + c.variant.columnTypeIndex = make(map[string]uint8, len(sortedTypeNames)) + + for _, typeName := range sortedTypeNames { + col, err := Type(typeName).Column("", c.tz) + if err != nil { + return fmt.Errorf("failed to add dynamic column with type %s: %w", typeName, err) + } + + c.addColumn(col) + } + + c.totalTypes = uint8(totalTypes) // Reset to server's totalTypes + + err = c.variant.decodeHeader(reader) + if err != nil { + return fmt.Errorf("failed to decode variant header: %w", err) + } + + return nil +} + +func (c *Dynamic) decodeData(reader *proto.Reader, rows int) error { + err := c.variant.decodeData(reader, rows) + if err != nil { + return fmt.Errorf("failed to decode variant data: %w", err) + } + + return nil +} + +func (c *Dynamic) ReadStatePrefix(reader *proto.Reader) error { + err := c.decodeHeader(reader) + if err != nil { + return fmt.Errorf("failed to decode dynamic header: %w", err) + } + + return nil +} + +func (c *Dynamic) Decode(reader *proto.Reader, rows int) error { + err := c.decodeData(reader, rows) + if err != nil { + return fmt.Errorf("failed to decode dynamic data: %w", err) + } + + return nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/dynamic_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/dynamic_gen.go new file mode 100644 index 00000000..425b1489 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/dynamic_gen.go @@ -0,0 +1,269 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by make codegen DO NOT EDIT. +// source: lib/column/codegen/dynamic.tpl + +package column + +import ( + "database/sql" + "encoding/json" + "github.com/ClickHouse/ch-go/proto" + "github.com/google/uuid" + "github.com/paulmach/orb" + "time" +) + +// inferClickHouseTypeFromGoType takes a Go interface{} and converts it to a ClickHouse type. +// Returns empty string if type was not matched. +// This is best effort and does not work for all types. +// Optimally, users should provide a type using DynamicWithType. +func inferClickHouseTypeFromGoType(v any) string { + switch v.(type) { + case float32: + return "Float32" + case *float32: + return "Float32" + case float64: + return "Float64" + case *float64: + return "Float64" + case int8: + return "Int8" + case *int8: + return "Int8" + case int16: + return "Int16" + case *int16: + return "Int16" + case int32: + return "Int32" + case *int32: + return "Int32" + case int64: + return "Int64" + case *int64: + return "Int64" + case uint8: + return "UInt8" + case *uint8: + return "UInt8" + case uint16: + return "UInt16" + case *uint16: + return "UInt16" + case uint32: + return "UInt32" + case *uint32: + return "UInt32" + case uint64: + return "UInt64" + case *uint64: + return "UInt64" + case string: + return "String" + case *string: + return "String" + case json.RawMessage: + return "String" + case *json.RawMessage: + return "String" + case sql.NullString: + return "String" + case *sql.NullString: + return "String" + case bool: + return "Bool" + case *bool: + return "Bool" + case sql.NullBool: + return "Bool" + case *sql.NullBool: + return "Bool" + case time.Time: + return "DateTime64(3)" + case *time.Time: + return "DateTime64(3)" + case sql.NullTime: + return "DateTime64(3)" + case *sql.NullTime: + return "DateTime64(3)" + case uuid.UUID: + return "UUID" + case *uuid.UUID: + return "UUID" + case proto.IPv6: + return "IPv6" + case *proto.IPv6: + return "IPv6" + case orb.MultiPolygon: + return "MultiPolygon" + case *orb.MultiPolygon: + return "MultiPolygon" + case orb.Point: + return "Point" + case *orb.Point: + return "Point" + case orb.Polygon: + return "Polygon" + case *orb.Polygon: + return "Polygon" + case orb.Ring: + return "Ring" + case *orb.Ring: + return "Ring" + case []float32: + return "Array(Float32)" + case []*float32: + return "Array(Float32)" + case []float64: + return "Array(Float64)" + case []*float64: + return "Array(Float64)" + case []int8: + return "Array(Int8)" + case []*int8: + return "Array(Int8)" + case []int16: + return "Array(Int16)" + case []*int16: + return "Array(Int16)" + case []int32: + return "Array(Int32)" + case []*int32: + return "Array(Int32)" + case []int64: + return "Array(Int64)" + case []*int64: + return "Array(Int64)" + case []*uint8: + return "Array(UInt8)" + case []uint16: + return "Array(UInt16)" + case []*uint16: + return "Array(UInt16)" + case []uint32: + return "Array(UInt32)" + case []*uint32: + return "Array(UInt32)" + case []uint64: + return "Array(UInt64)" + case []*uint64: + return "Array(UInt64)" + case []string: + return "Array(String)" + case []*string: + return "Array(String)" + case []json.RawMessage: + return "Array(String)" + case []*json.RawMessage: + return "Array(String)" + case []sql.NullString: + return "Array(String)" + case []*sql.NullString: + return "Array(String)" + case []bool: + return "Array(Bool)" + case []*bool: + return "Array(Bool)" + case []sql.NullBool: + return "Array(Bool)" + case []*sql.NullBool: + return "Array(Bool)" + case []time.Time: + return "Array(DateTime64(3))" + case []*time.Time: + return "Array(DateTime64(3))" + case []sql.NullTime: + return "Array(DateTime64(3))" + case []*sql.NullTime: + return "Array(DateTime64(3))" + case []uuid.UUID: + return "Array(UUID)" + case []*uuid.UUID: + return "Array(UUID)" + case []proto.IPv6: + return "Array(IPv6)" + case []*proto.IPv6: + return "Array(IPv6)" + case []orb.MultiPolygon: + return "Array(MultiPolygon)" + case []*orb.MultiPolygon: + return "Array(MultiPolygon)" + case []orb.Point: + return "Array(Point)" + case []*orb.Point: + return "Array(Point)" + case []orb.Polygon: + return "Array(Polygon)" + case []*orb.Polygon: + return "Array(Polygon)" + case []orb.Ring: + return "Array(Ring)" + case []*orb.Ring: + return "Array(Ring)" + case map[string]float32: + return "Map(String, Float32)" + case map[string]float64: + return "Map(String, Float64)" + case map[string]int8: + return "Map(String, Int8)" + case map[string]int16: + return "Map(String, Int16)" + case map[string]int32: + return "Map(String, Int32)" + case map[string]int64: + return "Map(String, Int64)" + case map[string]uint8: + return "Map(String, UInt8)" + case map[string]uint16: + return "Map(String, UInt16)" + case map[string]uint32: + return "Map(String, UInt32)" + case map[string]uint64: + return "Map(String, UInt64)" + case map[string]string: + return "Map(String, String)" + case map[string]json.RawMessage: + return "Map(String, String)" + case map[string]sql.NullString: + return "Map(String, String)" + case map[string]bool: + return "Map(String, Bool)" + case map[string]sql.NullBool: + return "Map(String, Bool)" + case map[string]time.Time: + return "Map(String, DateTime64(3))" + case map[string]sql.NullTime: + return "Map(String, DateTime64(3))" + case map[string]uuid.UUID: + return "Map(String, UUID)" + case map[string]proto.IPv6: + return "Map(String, IPv6)" + case map[string]orb.MultiPolygon: + return "Map(String, MultiPolygon)" + case map[string]orb.Point: + return "Map(String, Point)" + case map[string]orb.Polygon: + return "Map(String, Polygon)" + case map[string]orb.Ring: + return "Map(String, Ring)" + default: + return "" + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go index 935dd2d4..45aa07a1 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum.go @@ -18,89 +18,171 @@ package column import ( + "bytes" "errors" - "github.com/ClickHouse/ch-go/proto" "math" "strconv" - "strings" + + "github.com/ClickHouse/ch-go/proto" ) func Enum(chType Type, name string) (Interface, error) { - var ( - payload string - columnType = string(chType) - ) - if len(columnType) < 8 { - return nil, &Error{ - ColumnType: string(chType), - Err: errors.New("invalid Enum"), - } - } - switch { - case strings.HasPrefix(columnType, "Enum8"): - payload = columnType[6:] - case strings.HasPrefix(columnType, "Enum16"): - payload = columnType[7:] - default: + enumType, values, indexes, valid := extractEnumNamedValues(chType) + if !valid { return nil, &Error{ ColumnType: string(chType), Err: errors.New("invalid Enum"), } } - var ( - idents []string - indexes []int64 - ) - for _, block := range strings.Split(payload[:len(payload)-1], ",") { - parts := strings.Split(block, "=") - if len(parts) != 2 { - return nil, &Error{ - ColumnType: string(chType), - Err: errors.New("invalid Enum"), - } - } - var ( - ident = strings.TrimSpace(parts[0]) - index, err = strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 16) - ) - if err != nil || len(ident) < 2 { - return nil, &Error{ - ColumnType: string(chType), - Err: errors.New("invalid Enum"), - } - } - ident = ident[1 : len(ident)-1] - idents, indexes = append(idents, ident), append(indexes, index) - } - if strings.HasPrefix(columnType, "Enum8") { + + if enumType == enum8Type { enum := Enum8{ - iv: make(map[string]proto.Enum8, len(idents)), - vi: make(map[proto.Enum8]string, len(idents)), + iv: make(map[string]proto.Enum8, len(values)), + vi: make(map[proto.Enum8]string, len(values)), chType: chType, name: name, } - for i := range idents { - if indexes[i] > math.MaxUint8 { - return nil, &Error{ - ColumnType: string(chType), - Err: errors.New("invalid Enum"), - } - } + for i := range values { v := int8(indexes[i]) - enum.iv[idents[i]] = proto.Enum8(v) - enum.vi[proto.Enum8(v)] = idents[i] + enum.iv[values[i]] = proto.Enum8(v) + enum.vi[proto.Enum8(v)] = values[i] + + enum.enumValuesBitset[uint8(v)>>6] |= 1 << (v & 63) } return &enum, nil } enum := Enum16{ - iv: make(map[string]proto.Enum16, len(idents)), - vi: make(map[proto.Enum16]string, len(idents)), + iv: make(map[string]proto.Enum16, len(values)), + vi: make(map[proto.Enum16]string, len(values)), chType: chType, name: name, + // to be updated below, when ranging over all index/enum values + minEnum: math.MaxInt16, + maxEnum: math.MinInt16, } - for i := range idents { - enum.iv[idents[i]] = proto.Enum16(indexes[i]) - enum.vi[proto.Enum16(indexes[i])] = idents[i] + + for i := range values { + k := int16(indexes[i]) + enum.iv[values[i]] = proto.Enum16(k) + enum.vi[proto.Enum16(k)] = values[i] + if k < enum.minEnum { + enum.minEnum = k + } + if k > enum.maxEnum { + enum.maxEnum = k + } } + enum.continuous = (enum.maxEnum-enum.minEnum)+1 == int16(len(enum.vi)) return &enum, nil } + +const ( + enum8Type = "Enum8" + E + enum16Type = "Enum16" +) + +func extractEnumNamedValues(chType Type) (typ string, values []string, indexes []int, valid bool) { + src := []byte(chType) + + var bracketOpen, stringOpen bool + + var foundValueOffset int + var foundValueLen int + var skippedValueTokens []int + var indexFound bool + var valueFound bool + var valueIndex = 0 + + for c := 0; c < len(src); c++ { + token := src[c] + + switch { + // open bracket found, capture the type + case token == '(' && !stringOpen: + typ = string(src[:c]) + + // Ignore everything captured as non-enum type + if typ != enum8Type && typ != enum16Type { + return + } + + bracketOpen = true + break + // when inside a bracket, we can start capture value inside single quotes + case bracketOpen && token == '\'' && !stringOpen: + foundValueOffset = c + 1 + stringOpen = true + break + // close the string and capture the value + case token == '\'' && stringOpen: + stringOpen = false + foundValueLen = c - foundValueOffset + valueFound = true + break + // escape character, skip the next character + case token == '\\' && stringOpen: + skippedValueTokens = append(skippedValueTokens, c-foundValueOffset) + c++ + break + // capture optional index. `=` token is followed with an integer index + case token == '=' && !stringOpen: + if !valueFound { + return + } + indexStart := c + 1 + // find the end of the index, it's either a comma or a closing bracket + for _, token := range src[indexStart:] { + if token == ',' || token == ')' { + break + } + c++ + } + + idx, err := strconv.Atoi(string(bytes.TrimSpace(src[indexStart : c+1]))) + if err != nil { + return + } + valueIndex = idx + indexFound = true + break + // capture the value and index when a comma or closing bracket is found + case (token == ',' || token == ')') && !stringOpen: + if !valueFound { + return + } + // if no index was found for current value, increment the value index + // e.g. Enum8('a','b') is equivalent to Enum8('a'=1,'b'=2) + // or Enum8('a'=3,'b') is equivalent to Enum8('a'=3,'b'=4) + // so if no index is provided, we increment the value index + if !indexFound { + valueIndex++ + } + + // if the index is out of range, return + if (typ == enum8Type && valueIndex > math.MaxUint8) || + (typ == enum16Type && valueIndex > math.MaxUint16) { + return + } + + foundName := src[foundValueOffset : foundValueOffset+foundValueLen] + for _, skipped := range skippedValueTokens { + foundName = append(foundName[:skipped], foundName[skipped+1:]...) + } + + indexes = append(indexes, valueIndex) + values = append(values, string(foundName)) + indexFound = false + valueFound = false + break + } + } + + // Enum type must have at least one value + if len(indexes) == 0 { + return + } + + valid = true + return +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go index c394e7ff..d3a15b80 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go @@ -31,6 +31,10 @@ type Enum16 struct { chType Type col proto.ColEnum16 name string + + continuous bool + minEnum int16 + maxEnum int16 } func (col *Enum16) Reset() { @@ -179,9 +183,17 @@ func (col *Enum16) Append(v any) (nulls []uint8, err error) { func (col *Enum16) AppendRow(elem any) error { switch elem := elem.(type) { case int16: - return col.AppendRow(int(elem)) + if col.continuous && elem >= col.minEnum && elem <= col.maxEnum { + col.col.Append(proto.Enum16(elem)) + } else { + return col.AppendRow(int(elem)) + } case *int16: - return col.AppendRow(int(*elem)) + if col.continuous && *elem >= col.minEnum && *elem <= col.maxEnum { + col.col.Append(proto.Enum16(*elem)) + } else { + return col.AppendRow(int(*elem)) + } case int: v := proto.Enum16(elem) _, ok := col.vi[v] diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go index 4aee561a..2f291365 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go @@ -31,6 +31,10 @@ type Enum8 struct { chType Type name string col proto.ColEnum8 + + // Encoding of the enums that have been specified by the user. + // Using this when appending rows, to validate the enum is valud. + enumValuesBitset [4]uint64 } func (col *Enum8) Reset() { @@ -183,27 +187,25 @@ func (col *Enum8) AppendRow(elem any) error { case *int8: return col.AppendRow(int(*elem)) case int: - v := proto.Enum8(elem) - _, ok := col.vi[v] - if !ok { + // Check if the enum value is defined + if col.enumValuesBitset[uint8(elem)>>6]&(1<<(elem&63)) == 0 { return &Error{ Err: fmt.Errorf("unknown element %v", elem), ColumnType: string(col.chType), } } - col.col.Append(v) + col.col.Append(proto.Enum8(elem)) case *int: switch { case elem != nil: - v := proto.Enum8(*elem) - _, ok := col.vi[v] - if !ok { + // Check if the enum value is defined + if col.enumValuesBitset[uint8(*elem)>>6]&(1<<(*elem&63)) == 0 { return &Error{ Err: fmt.Errorf("unknown element %v", *elem), ColumnType: string(col.chType), } } - col.col.Append(v) + col.col.Append(proto.Enum8(*elem)) default: col.col.Append(0) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go index 8ddb0d1c..50966e1f 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go @@ -22,9 +22,10 @@ import ( "database/sql/driver" "encoding" "fmt" - "github.com/ClickHouse/ch-go/proto" "reflect" + "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/binary" ) @@ -77,7 +78,27 @@ func (col *FixedString) ScanRow(dest any, row int) error { **d = col.row(row) case encoding.BinaryUnmarshaler: return d.UnmarshalBinary(col.rowBytes(row)) + case *[]byte: + *d = col.rowBytes(row) default: + // handle for *[n]byte + if t := reflect.TypeOf(dest); t.Kind() == reflect.Pointer && + t.Elem().Kind() == reflect.Array && + t.Elem().Elem() == reflect.TypeOf(byte(0)) { + size := t.Elem().Len() + if size != col.col.Size { + return &ColumnConverterError{ + Op: "ScanRow", + To: fmt.Sprintf("%T", dest), + From: "FixedString", + Hint: fmt.Sprintf("invalid size %d, expect %d", size, col.col.Size), + } + } + rv := reflect.ValueOf(dest).Elem() + reflect.Copy(rv, reflect.ValueOf(col.row(row))) + return nil + } + if scan, ok := dest.(sql.Scanner); ok { return scan.Scan(col.row(row)) } @@ -90,32 +111,61 @@ func (col *FixedString) ScanRow(dest any, row int) error { return nil } +// safeAppendRow appends the value to the underlying column with a length check. +// This re-implements the logic from ch-go but without the panic. +// It also fills unused space with zeros. +func (col *FixedString) safeAppendRow(v []byte) error { + if col.col.Size == 0 { + // If unset, use first value's length for the string size + col.col.Size = len(v) + } + + if len(v) > col.col.Size { + return fmt.Errorf("input value with length %d exceeds FixedString(%d) capacity", len(v), col.col.Size) + } + + col.col.Buf = append(col.col.Buf, v...) + + // Fill the unused space of the fixed string with zeros + padding := col.col.Size - len(v) + for i := 0; i < padding; i++ { + col.col.Buf = append(col.col.Buf, 0) + } + + return nil +} + func (col *FixedString) Append(v any) (nulls []uint8, err error) { switch v := v.(type) { case []string: nulls = make([]uint8, len(v)) for _, v := range v { + var err error if v == "" { - col.col.Append(make([]byte, col.col.Size)) + err = col.safeAppendRow(nil) } else { - col.col.Append(binary.Str2Bytes(v, col.col.Size)) + err = col.safeAppendRow(binary.Str2Bytes(v, col.col.Size)) + } + + if err != nil { + return nil, err } } case []*string: nulls = make([]uint8, len(v)) for i, v := range v { + var err error if v == nil { nulls[i] = 1 + err = col.safeAppendRow(nil) + } else if *v == "" { + err = col.safeAppendRow(nil) + } else { + err = col.safeAppendRow(binary.Str2Bytes(*v, col.col.Size)) } - switch { - case v == nil: - col.col.Append(make([]byte, col.col.Size)) - default: - if *v == "" { - col.col.Append(make([]byte, col.col.Size)) - } else { - col.col.Append(binary.Str2Bytes(*v, col.col.Size)) - } + + if err != nil { + return nil, err } } case encoding.BinaryMarshaler: @@ -123,9 +173,51 @@ func (col *FixedString) Append(v any) (nulls []uint8, err error) { if err != nil { return nil, err } - col.col.Append(data) + err = col.safeAppendRow(data) + if err != nil { + return nil, err + } + nulls = make([]uint8, len(data)/col.col.Size) + case [][]byte: + nulls = make([]uint8, len(v)) + for i, v := range v { + if v == nil { + nulls[i] = 1 + } + n := len(v) + var err error + if n == 0 { + err = col.safeAppendRow(nil) + } else if n >= col.col.Size { + err = col.safeAppendRow(v[0:col.col.Size]) + } else { + err = col.safeAppendRow(v) + } + + if err != nil { + return nil, err + } + } default: + // handle for [][n]byte + if t := reflect.TypeOf(v); t.Kind() == reflect.Slice && + t.Elem().Kind() == reflect.Array && + t.Elem().Elem() == reflect.TypeOf(byte(0)) { + rv := reflect.ValueOf(v) + nulls = make([]uint8, rv.Len()) + for i := 0; i < rv.Len(); i++ { + e := rv.Index(i) + data := make([]byte, e.Len()) + reflect.Copy(reflect.ValueOf(data), e) + err := col.safeAppendRow(data) + if err != nil { + return nil, err + } + } + return + } + if s, ok := v.(driver.Valuer); ok { val, err := s.Value() if err != nil { @@ -147,25 +239,64 @@ func (col *FixedString) Append(v any) (nulls []uint8, err error) { return } -func (col *FixedString) AppendRow(v any) (err error) { - data := make([]byte, col.col.Size) +func (col *FixedString) AppendRow(v any) error { switch v := v.(type) { + case []byte: + err := col.safeAppendRow(v) + if err != nil { + return err + } case string: - if v != "" { - data = binary.Str2Bytes(v, col.col.Size) + err := col.safeAppendRow(binary.Str2Bytes(v, col.col.Size)) + if err != nil { + return err } case *string: + var data []byte if v != nil { - if *v != "" { - data = binary.Str2Bytes(*v, col.col.Size) - } + data = binary.Str2Bytes(*v, col.col.Size) + } + + err := col.safeAppendRow(data) + if err != nil { + return err } case nil: + err := col.safeAppendRow(nil) + if err != nil { + return err + } case encoding.BinaryMarshaler: - if data, err = v.MarshalBinary(); err != nil { + data, err := v.MarshalBinary() + if err != nil { + return err + } + + err = col.safeAppendRow(data) + if err != nil { return err } default: + if t := reflect.TypeOf(v); t.Kind() == reflect.Array && t.Elem() == reflect.TypeOf(byte(0)) { + if t.Len() != col.col.Size { + return &ColumnConverterError{ + Op: "AppendRow", + To: "FixedString", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("invalid size %d, expect %d", t.Len(), col.col.Size), + } + } + + data := make([]byte, col.col.Size) + reflect.Copy(reflect.ValueOf(data), reflect.ValueOf(v)) + err := col.safeAppendRow(data) + if err != nil { + return err + } + + return nil + } + if s, ok := v.(driver.Valuer); ok { val, err := s.Value() if err != nil { @@ -189,7 +320,7 @@ func (col *FixedString) AppendRow(v any) (err error) { From: fmt.Sprintf("%T", v), } } - col.col.Append(data) + return nil } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go index 2839a41c..1263b897 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go @@ -86,9 +86,15 @@ func (col *MultiPolygon) Append(v any) (nulls []uint8, err error) { } return col.set.Append(values) case []*orb.MultiPolygon: + nulls = make([]uint8, len(v)) values := make([][]orb.Polygon, 0, len(v)) - for _, v := range v { - values = append(values, *v) + for i, v := range v { + if v == nil { + nulls[i] = 1 + values = append(values, orb.MultiPolygon{}) + } else { + values = append(values, *v) + } } return col.set.Append(values) default: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go index c93a715a..4a4fe16b 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go @@ -89,11 +89,16 @@ func (col *Point) Append(v any) (nulls []uint8, err error) { } case []*orb.Point: nulls = make([]uint8, len(v)) - for _, v := range v { - col.col.Append(proto.Point{ - X: v.Lon(), - Y: v.Lat(), - }) + for i, v := range v { + if v == nil { + nulls[i] = 1 + col.col.Append(proto.Point{}) + } else { + col.col.Append(proto.Point{ + X: v.Lon(), + Y: v.Lat(), + }) + } } default: if valuer, ok := v.(driver.Valuer); ok { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go index 54226081..6e78b1a9 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go @@ -86,9 +86,15 @@ func (col *Polygon) Append(v any) (nulls []uint8, err error) { } return col.set.Append(values) case []*orb.Polygon: + nulls = make([]uint8, len(v)) values := make([][]orb.Ring, 0, len(v)) - for _, v := range v { - values = append(values, *v) + for i, v := range v { + if v == nil { + nulls[i] = 1 + values = append(values, orb.Polygon{}) + } else { + values = append(values, *v) + } } return col.set.Append(values) default: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go index 0f190a8e..a64de47f 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go @@ -86,9 +86,15 @@ func (col *Ring) Append(v any) (nulls []uint8, err error) { } return col.set.Append(values) case []*orb.Ring: + nulls = make([]uint8, len(v)) values := make([][]orb.Point, 0, len(v)) - for _, v := range v { - values = append(values, *v) + for i, v := range v { + if v == nil { + nulls[i] = 1 + values = append(values, orb.Ring{}) + } else { + values = append(values, *v) + } } return col.set.Append(values) default: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go index a15f6d3e..3d4c2528 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go @@ -145,7 +145,7 @@ func (col *IPv4) Append(v any) (nulls []uint8, err error) { ips := make([]netip.Addr, len(v), len(v)) for i := range v { switch { - case v != nil: + case v[i] != nil: ip, err := strToIPV4(*v[i]) if err != nil { return nulls, err @@ -164,7 +164,7 @@ func (col *IPv4) Append(v any) (nulls []uint8, err error) { nulls = make([]uint8, len(v)) for i := range v { switch { - case v != nil: + case v[i] != nil: col.col.Append(proto.ToIPv4(*v[i])) default: nulls[i] = 1 @@ -196,7 +196,7 @@ func (col *IPv4) Append(v any) (nulls []uint8, err error) { nulls = make([]uint8, len(v)) for i := range v { switch { - case v != nil: + case v[i] != nil: col.col.Append(proto.IPv4(*v[i])) default: nulls[i] = 1 diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go index 0978a0b0..682ff853 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go @@ -19,925 +19,769 @@ package column import ( "fmt" - "github.com/ClickHouse/ch-go/proto" + "github.com/ClickHouse/clickhouse-go/v2/lib/chcol" + "math" "reflect" + "strconv" "strings" "time" + + "github.com/ClickHouse/ch-go/proto" ) -// inverse mapping - go types to clickhouse types -var kindMappings = map[reflect.Kind]string{ - reflect.String: "String", - reflect.Int: "Int64", - reflect.Int8: "Int8", - reflect.Int16: "Int16", - reflect.Int32: "Int32", - reflect.Int64: "Int64", - reflect.Uint: "UInt64", - reflect.Uint8: "UInt8", - reflect.Uint16: "UInt16", - reflect.Uint32: "UInt32", - reflect.Uint64: "UInt64", - reflect.Float32: "Float32", - reflect.Float64: "Float64", - reflect.Bool: "Bool", -} - -// complex types for which a mapping exists - currently we map to String but could enhance in the future for other types -var typeMappings = map[string]struct{}{ - // currently JSON doesn't support DateTime, Decimal or IP so mapped to String - "time.Time": {}, - "decimal.Decimal": {}, - "net.IP": {}, - "uuid.UUID": {}, -} - -type JSON interface { - Interface - appendEmptyValue() error -} - -type JSONParent interface { - upsertValue(name string, ct string) (*JSONValue, error) - upsertList(name string) (*JSONList, error) - upsertObject(name string) (*JSONObject, error) - insertEmptyColumn(name string) error - columnNames() []string - rows() int -} - -func parseType(name string, vType reflect.Type, values any, isArray bool, jCol JSONParent, numEmpty int) error { - _, ok := typeMappings[vType.String()] - if !ok { - return &UnsupportedColumnTypeError{ - t: Type(vType.String()), - } - } - ct := "String" - if isArray { - ct = fmt.Sprintf("Array(%s)", ct) - } - col, err := jCol.upsertValue(name, ct) - if err != nil { - return err - } - col.origType = vType +const JSONObjectSerializationVersion uint64 = 0 +const JSONStringSerializationVersion uint64 = 1 +const JSONUnsetSerializationVersion uint64 = math.MaxUint64 +const DefaultMaxDynamicPaths = 1024 - //pre pad with empty - e.g. for new values in maps - for i := 0; i < numEmpty; i++ { - if isArray { - // empty array for nil of the right type - err = col.AppendRow([]string{}) - } else { - // empty value of the type - err = col.AppendRow(fmt.Sprint(reflect.New(vType).Elem().Interface())) - } - if err != nil { - return err - } - } - if isArray { - iValues := reflect.ValueOf(values) - sValues := make([]string, iValues.Len(), iValues.Len()) - for i := 0; i < iValues.Len(); i++ { - sValues[i] = fmt.Sprint(iValues.Index(i).Interface()) - } - return col.AppendRow(sValues) - } - return col.AppendRow(fmt.Sprint(values)) +type JSON struct { + chType Type + tz *time.Location + name string + rows int + + serializationVersion uint64 + + jsonStrings String + + typedPaths []string + typedPathsIndex map[string]int + typedColumns []Interface + + skipPaths []string + skipPathsIndex map[string]int + + dynamicPaths []string + dynamicPathsIndex map[string]int + dynamicColumns []*Dynamic + + maxDynamicPaths int + maxDynamicTypes int + totalDynamicPaths int } -func parsePrimitive(name string, kind reflect.Kind, values any, isArray bool, jCol JSONParent, numEmpty int) error { - ct, ok := kindMappings[kind] - if !ok { - return &UnsupportedColumnTypeError{ - t: Type(fmt.Sprintf("%s - %s", kind, reflect.TypeOf(values).String())), - } - } - var err error - if isArray { - ct = fmt.Sprintf("Array(%s)", ct) - // if we have a []any we will need to cast to the target column type - this will be based on the first - // values types. Inconsistent slices will fail. - values, err = convertSlice(values) - if err != nil { - return err - } +func (c *JSON) parse(t Type, tz *time.Location) (_ *JSON, err error) { + c.chType = t + c.tz = tz + tStr := string(t) + + c.serializationVersion = JSONUnsetSerializationVersion + c.typedPathsIndex = make(map[string]int) + c.skipPathsIndex = make(map[string]int) + c.dynamicPathsIndex = make(map[string]int) + c.maxDynamicPaths = DefaultMaxDynamicPaths + c.maxDynamicTypes = DefaultMaxDynamicTypes + + if tStr == "JSON" { + return c, nil } - col, err := jCol.upsertValue(name, ct) - if err != nil { - return err + + if !strings.HasPrefix(tStr, "JSON(") || !strings.HasSuffix(tStr, ")") { + return nil, &UnsupportedColumnTypeError{t: t} } - //pre pad with empty - e.g. for new values in maps - for i := 0; i < numEmpty; i++ { - if isArray { - // empty array for nil of the right type - err = col.AppendRow(reflect.MakeSlice(reflect.TypeOf(values), 0, 0).Interface()) - } else { - err = col.AppendRow(nil) + typePartsStr := strings.TrimPrefix(tStr, "JSON(") + typePartsStr = strings.TrimSuffix(typePartsStr, ")") + + typeParts := splitWithDelimiters(typePartsStr) + for _, typePart := range typeParts { + typePart = strings.TrimSpace(typePart) + + if strings.HasPrefix(typePart, "max_dynamic_paths=") { + v := strings.TrimPrefix(typePart, "max_dynamic_paths=") + if maxPaths, err := strconv.Atoi(v); err == nil { + c.maxDynamicPaths = maxPaths + } + + continue } - if err != nil { - return err + + if strings.HasPrefix(typePart, "max_dynamic_types=") { + v := strings.TrimPrefix(typePart, "max_dynamic_types=") + if maxTypes, err := strconv.Atoi(v); err == nil { + c.maxDynamicTypes = maxTypes + } + + continue } - } - return col.AppendRow(values) -} + if strings.HasPrefix(typePart, "SKIP REGEXP") { + pattern := strings.TrimPrefix(typePart, "SKIP REGEXP") + pattern = strings.Trim(pattern, " '") + c.skipPaths = append(c.skipPaths, pattern) + c.skipPathsIndex[pattern] = len(c.skipPaths) - 1 -// converts a []any of primitives to a typed slice -// maybe this can be done with reflection but likely slower. investigate. -// this uses the first value to determine the type - subsequent values must currently be of the same type - we might cast later -// but wider driver doesn't support e.g. int to int64 -func convertSlice(values any) (any, error) { - rValues := reflect.ValueOf(values) - if rValues.Len() == 0 || rValues.Index(0).Kind() != reflect.Interface { - return values, nil - } - var fType reflect.Type - for i := 0; i < rValues.Len(); i++ { - elem := rValues.Index(i).Elem() - if elem.IsValid() { - fType = elem.Type() - break + continue } - } - if fType == nil { - return []any{}, nil - } - typedSlice := reflect.MakeSlice(reflect.SliceOf(fType), 0, rValues.Len()) - for i := 0; i < rValues.Len(); i++ { - value := rValues.Index(i) - if value.IsNil() { - typedSlice = reflect.Append(typedSlice, reflect.Zero(fType)) + + if strings.HasPrefix(typePart, "SKIP") { + path := strings.TrimPrefix(typePart, "SKIP") + path = strings.Trim(path, " `") + c.skipPaths = append(c.skipPaths, path) + c.skipPathsIndex[path] = len(c.skipPaths) - 1 + continue } - if rValues.Index(i).Elem().Type() != fType { - return nil, &Error{ - ColumnType: fmt.Sprint(fType), - Err: fmt.Errorf("inconsistent slices are not supported - expected %s got %s", fType, rValues.Index(i).Elem().Type()), - } + + typedPathParts := strings.SplitN(typePart, " ", 2) + if len(typedPathParts) != 2 { + continue } - typedSlice = reflect.Append(typedSlice, rValues.Index(i).Elem()) - } - return typedSlice.Interface(), nil -} -func (jCol *JSONList) createNewOffsets(num int) { - for i := 0; i < num; i++ { - //single depth so can take 1st - if jCol.offsets[0].values.col.Rows() == 0 { - // first entry in the column - jCol.offsets[0].values.col.Append(0) - } else { - // entry for this object to see offset from last - offsets are cumulative - jCol.offsets[0].values.col.Append(jCol.offsets[0].values.col.Row(jCol.offsets[0].values.col.Rows() - 1)) + typedPath := strings.Trim(typedPathParts[0], "`") + typeName := strings.TrimSpace(typedPathParts[1]) + + c.typedPaths = append(c.typedPaths, typedPath) + c.typedPathsIndex[typedPath] = len(c.typedPaths) - 1 + + col, err := Type(typeName).Column("", tz) + if err != nil { + return nil, fmt.Errorf("failed to init column of type \"%s\" at path \"%s\": %w", typeName, typedPath, err) } + + c.typedColumns = append(c.typedColumns, col) } + + return c, nil } -func getStructFieldName(field reflect.StructField) (string, bool) { - name := field.Name - tag := field.Tag.Get("json") - // not a standard but we allow - to omit fields - if tag == "-" { - return name, true - } - if tag != "" { - return tag, false - } - // support ch tag as well as this is used elsewhere - tag = field.Tag.Get("ch") - if tag == "-" { - return name, true - } - if tag != "" { - return tag, false - } - return name, false +func (c *JSON) hasTypedPath(path string) bool { + _, ok := c.typedPathsIndex[path] + return ok } -// ensures numeric keys and ` are escaped properly -func getMapFieldName(name string) string { - if !escapeColRegex.MatchString(name) { - return fmt.Sprintf("`%s`", colEscape.Replace(name)) - } - return colEscape.Replace(name) +func (c *JSON) hasDynamicPath(path string) bool { + _, ok := c.dynamicPathsIndex[path] + return ok } -func parseSlice(name string, values any, jCol JSONParent, preFill int) error { - fType := reflect.TypeOf(values).Elem() - sKind := fType.Kind() - rValues := reflect.ValueOf(values) +func (c *JSON) hasSkipPath(path string) bool { + _, ok := c.skipPathsIndex[path] + return ok +} - if sKind == reflect.Interface { - //use the first element to determine if it is a complex or primitive map - after this we need consistent dimensions - if rValues.Len() == 0 { - return nil - } - var value reflect.Value - for i := 0; i < rValues.Len(); i++ { - value = rValues.Index(i).Elem() - if value.IsValid() { - break - } - } - if !value.IsValid() { - return nil +// pathHasNestedValues returns true if the provided path has child paths in typed or dynamic paths +func (c *JSON) pathHasNestedValues(path string) bool { + for _, typedPath := range c.typedPaths { + if strings.HasPrefix(typedPath, path+".") { + return true } - fType = value.Type() - sKind = value.Kind() } - if _, ok := typeMappings[fType.String()]; ok { - return parseType(name, fType, values, true, jCol, preFill) - } else if sKind == reflect.Struct || sKind == reflect.Map || sKind == reflect.Slice { - if rValues.Len() == 0 { - return nil - } - col, err := jCol.upsertList(name) - if err != nil { - return err - } - col.createNewOffsets(preFill + 1) - for i := 0; i < rValues.Len(); i++ { - // increment offset - col.offsets[0].values.col[col.offsets[0].values.col.Rows()-1] += 1 - value := rValues.Index(i) - sKind = value.Kind() - if sKind == reflect.Interface { - sKind = value.Elem().Kind() - } - switch sKind { - case reflect.Struct: - col.isNested = true - if err = iterateStruct(value, col, 0); err != nil { - return err - } - case reflect.Map: - col.isNested = true - if err = iterateMap(value, col, 0); err != nil { - return err - } - case reflect.Slice: - if err = parseSlice("", value.Interface(), col, 0); err != nil { - return err - } - default: - // only happens if slice has a primitive mixed with complex types in a []any - return &Error{ - ColumnType: fmt.Sprint(sKind), - Err: fmt.Errorf("slices must be same dimension in column %s", col.Name()), - } - } + for _, dynamicPath := range c.dynamicPaths { + if strings.HasPrefix(dynamicPath, path+".") { + return true } - return nil } - return parsePrimitive(name, sKind, values, true, jCol, preFill) -} -func parseStruct(name string, structVal reflect.Value, jCol JSONParent, preFill int) error { - col, err := jCol.upsertObject(name) - if err != nil { - return err - } - return iterateStruct(structVal, col, preFill) + return false } -func iterateStruct(structVal reflect.Value, col JSONParent, preFill int) error { - // structs generally have consistent field counts but we ignore nil values that are any as we can't infer from - // these until they occur - so we might need to either backfill when to do occur or insert empty based on previous - if structVal.Kind() == reflect.Interface { - // can happen if passed from []any - structVal = structVal.Elem() +// valueAtPath returns the row value at the specified path, typed or dynamic +func (c *JSON) valueAtPath(path string, row int, ptr bool) any { + if colIndex, ok := c.typedPathsIndex[path]; ok { + return c.typedColumns[colIndex].Row(row, ptr) } - currentColumns := col.columnNames() - columnLookup := make(map[string]struct{}) - numRows := col.rows() - for _, name := range currentColumns { - columnLookup[name] = struct{}{} + if colIndex, ok := c.dynamicPathsIndex[path]; ok { + return c.dynamicColumns[colIndex].Row(row, ptr) } - addedColumns := make([]string, structVal.NumField(), structVal.NumField()) - newColumn := false - for i := 0; i < structVal.NumField(); i++ { - fName, omit := getStructFieldName(structVal.Type().Field(i)) - if omit { - continue - } - field := structVal.Field(i) - if !field.CanInterface() { - // can't interface - likely not exported so ignore the field - continue - } - kind := field.Kind() - value := field.Interface() - fType := field.Type() - //resolve underlying kind - if kind == reflect.Interface { - if value == nil { - // ignore nil fields - continue - } - kind = reflect.TypeOf(value).Kind() - field = reflect.ValueOf(value) - fType = field.Type() - } - if _, ok := columnLookup[fName]; !ok && len(currentColumns) > 0 { - // new column - need to handle missing - preFill = numRows - newColumn = true - } - if _, ok := typeMappings[fType.String()]; ok { - if err := parseType(fName, fType, value, false, col, preFill); err != nil { - return err - } - } else { - switch kind { - case reflect.Slice: - if reflect.ValueOf(value).Len() == 0 { - continue - } - if err := parseSlice(fName, value, col, preFill); err != nil { - return err - } - case reflect.Struct: - if err := parseStruct(fName, field, col, preFill); err != nil { - return err - } - case reflect.Map: - if err := parseMap(fName, field, col, preFill); err != nil { - return err - } - default: - if err := parsePrimitive(fName, kind, value, false, col, preFill); err != nil { - return err - } - } - } - addedColumns[i] = fName - if newColumn { - // reset as otherwise prefill overflow to other fields. But don't reset if this prefill has come from - // a higher level - preFill = 0 - } - } - // handle missing - missingColumns := difference(currentColumns, addedColumns) - for _, name := range missingColumns { - if err := col.insertEmptyColumn(name); err != nil { - return err - } - } return nil } -func parseMap(name string, mapVal reflect.Value, jCol JSONParent, preFill int) error { - if mapVal.Type().Key().Kind() != reflect.String { - return &Error{ - ColumnType: fmt.Sprint(mapVal.Type().Key().Kind()), - Err: fmt.Errorf("map keys must be string for column %s", name), - } +// scanTypedPathToValue scans the provided typed path into a `reflect.Value` +func (c *JSON) scanTypedPathToValue(path string, row int, value reflect.Value) error { + colIndex, ok := c.typedPathsIndex[path] + if !ok { + return fmt.Errorf("typed path \"%s\" does not exist in JSON column", path) } - col, err := jCol.upsertObject(name) + + col := c.typedColumns[colIndex] + err := col.ScanRow(value.Addr().Interface(), row) if err != nil { - return err - } - return iterateMap(mapVal, col, preFill) -} - -func iterateMap(mapVal reflect.Value, col JSONParent, preFill int) error { - // maps can have inconsistent numbers of elements - we must ensure they are consistent in the encoding - // two inconsistent options - 1. new - map has new columns 2. massing - map has missing columns - // for (1) we need to update previous, for (2) we need to ensure we add a null entry - if mapVal.Kind() == reflect.Interface { - // can happen if passed from []any - mapVal = mapVal.Elem() - } - - currentColumns := col.columnNames() - //gives us a fast lookup for large maps - columnLookup := make(map[string]struct{}) - numRows := col.rows() - // true if we need nil values - for _, name := range currentColumns { - columnLookup[name] = struct{}{} - } - addedColumns := make([]string, len(mapVal.MapKeys()), len(mapVal.MapKeys())) - newColumn := false - for i, key := range mapVal.MapKeys() { - if newColumn { - // reset as otherwise prefill overflow to other fields. But don't reset if this prefill has come from - // a higher level - preFill = 0 - } - - name := getMapFieldName(key.Interface().(string)) - if _, ok := columnLookup[name]; !ok && len(currentColumns) > 0 { - // new column - need to handle - preFill = numRows - newColumn = true - } - field := mapVal.MapIndex(key) - kind := field.Kind() - fType := field.Type() - - if kind == reflect.Interface { - if field.Interface() == nil { - // ignore nil fields - continue - } - kind = reflect.TypeOf(field.Interface()).Kind() - field = reflect.ValueOf(field.Interface()) - fType = field.Type() - } - if _, ok := typeMappings[fType.String()]; ok { - if err := parseType(name, fType, field.Interface(), false, col, preFill); err != nil { - return err - } - } else { - switch kind { - case reflect.Struct: - if err := parseStruct(name, field, col, preFill); err != nil { - return err - } - case reflect.Slice: - if err := parseSlice(name, field.Interface(), col, preFill); err != nil { - return err - } - case reflect.Map: - if err := parseMap(name, field, col, preFill); err != nil { - return err - } - default: - if err := parsePrimitive(name, kind, field.Interface(), false, col, preFill); err != nil { - return err - } - } - } - addedColumns[i] = name - } - // handle missing - missingColumns := difference(currentColumns, addedColumns) - for _, name := range missingColumns { - if err := col.insertEmptyColumn(name); err != nil { - return err - } + return fmt.Errorf("failed to scan %s column into typed path \"%s\": %w", col.Type(), path, err) } + return nil } -func appendStructOrMap(jCol *JSONObject, data any) error { - vData := reflect.ValueOf(data) - kind := vData.Kind() - if kind == reflect.Struct { - return iterateStruct(vData, jCol, 0) - } - if kind == reflect.Map { - if reflect.TypeOf(data).Key().Kind() != reflect.String { - return &Error{ - ColumnType: fmt.Sprint(reflect.TypeOf(data).Key().Kind()), - Err: fmt.Errorf("map keys must be string for column %s", jCol.Name()), - } - } - if jCol.columns == nil && vData.Len() == 0 { - // if map is empty, we need to create an empty Tuple to make sure subcolumns protocol is happy - // _dummy is a ClickHouse internal name for empty Tuple subcolumn - // it has the same effect as `INSERT INTO single_json_type_table VALUES ('{}');` - jCol.upsertValue("_dummy", "Int8") - return jCol.insertEmptyColumn("_dummy") - } - return iterateMap(vData, jCol, 0) +// scanDynamicPathToValue scans the provided typed path into a `reflect.Value` +func (c *JSON) scanDynamicPathToValue(path string, row int, value reflect.Value) error { + colIndex, ok := c.dynamicPathsIndex[path] + if !ok { + return fmt.Errorf("dynamic path \"%s\" does not exist in JSON column", path) } - return &UnsupportedColumnTypeError{ - t: Type(fmt.Sprint(kind)), + + col := c.dynamicColumns[colIndex] + err := col.ScanRow(value.Addr().Interface(), row) + if err != nil { + return fmt.Errorf("failed to scan %s column into dynamic path \"%s\": %w", col.Type(), path, err) } -} -type JSONValue struct { - Interface - // represents the type e.g. uuid - these may have been mapped to a Column type support by JSON e.g. String - origType reflect.Type + return nil } -func (jCol *JSONValue) Reset() { - jCol.Interface.Reset() -} +func (c *JSON) rowAsJSON(row int) *chcol.JSON { + obj := chcol.NewJSON() -func (jCol *JSONValue) appendEmptyValue() error { - switch jCol.Interface.(type) { - case *Array: - if jCol.Rows() > 0 { - return jCol.AppendRow(reflect.MakeSlice(reflect.TypeOf(jCol.Row(0, false)), 0, 0).Interface()) - } - return &Error{ - ColumnType: "unknown", - Err: fmt.Errorf("can't add empty value to column %s - no entries to infer type", jCol.Name()), - } - default: - // can't just append nil here as we need a custom nil value for the type - if jCol.origType != nil { - return jCol.AppendRow(fmt.Sprint(reflect.New(jCol.origType).Elem().Interface())) - } - return jCol.AppendRow(nil) + for i, path := range c.typedPaths { + col := c.typedColumns[i] + obj.SetValueAtPath(path, col.Row(row, false)) } -} -func (jCol *JSONValue) Type() Type { - return Type(fmt.Sprintf("%s %s", jCol.Name(), jCol.Interface.Type())) + for i, path := range c.dynamicPaths { + col := c.dynamicColumns[i] + obj.SetValueAtPath(path, col.Row(row, false)) + } + + return obj } -type JSONList struct { - Array - name string - isNested bool // indicates if this a list of objects i.e. a Nested +func (c *JSON) Name() string { + return c.name } -func (jCol *JSONList) Name() string { - return jCol.name +func (c *JSON) Type() Type { + return c.chType } -func (jCol *JSONList) columnNames() []string { - return jCol.Array.values.(*JSONObject).columnNames() +func (c *JSON) Rows() int { + return c.rows } -func (jCol *JSONList) rows() int { - return jCol.values.(*JSONObject).Rows() +func (c *JSON) Row(row int, ptr bool) any { + switch c.serializationVersion { + case JSONObjectSerializationVersion: + return c.rowAsJSON(row) + case JSONStringSerializationVersion: + return c.jsonStrings.Row(row, ptr) + default: + return nil + } } -func createJSONList(name string, tz *time.Location) (jCol *JSONList) { - // lists are represented as Nested which are in turn encoded as Array(Tuple()). We thus pass a Array(JSONObject()) - // as this encodes like a tuple - lCol := &JSONList{ - name: name, +func (c *JSON) ScanRow(dest any, row int) error { + switch c.serializationVersion { + case JSONObjectSerializationVersion: + return c.scanRowObject(dest, row) + case JSONStringSerializationVersion: + return c.scanRowString(dest, row) + default: + return fmt.Errorf("unsupported JSON serialization version for scan: %d", c.serializationVersion) } - lCol.values = &JSONObject{tz: tz} - // depth should always be one as nested arrays aren't possible - lCol.depth = 1 - lCol.scanType = scanTypeSlice - offsetScanTypes := []reflect.Type{lCol.scanType} - lCol.offsets = []*offset{{ - scanType: offsetScanTypes[0], - }} - return lCol } -func (jCol *JSONList) appendEmptyValue() error { - // only need to bump the offsets - jCol.createNewOffsets(1) - return nil +func (c *JSON) scanRowObject(dest any, row int) error { + switch v := dest.(type) { + case *chcol.JSON: + obj := c.rowAsJSON(row) + *v = *obj + return nil + case **chcol.JSON: + obj := c.rowAsJSON(row) + **v = *obj + return nil + case chcol.JSONDeserializer: + obj := c.rowAsJSON(row) + err := v.DeserializeClickHouseJSON(obj) + if err != nil { + return fmt.Errorf("failed to deserialize using DeserializeClickHouseJSON: %w", err) + } + + return nil + } + + switch val := reflect.ValueOf(dest); val.Kind() { + case reflect.Pointer: + if val.Elem().Kind() == reflect.Struct { + return c.scanIntoStruct(dest, row) + } else if val.Elem().Kind() == reflect.Map { + return c.scanIntoMap(dest, row) + } + } + + return fmt.Errorf("destination must be a pointer to struct or map, or %s. hint: enable \"output_format_native_write_json_as_string\" setting for string decoding", scanTypeJSON.String()) } -func (jCol *JSONList) insertEmptyColumn(name string) error { - return jCol.values.(*JSONObject).insertEmptyColumn(name) +func (c *JSON) scanRowString(dest any, row int) error { + return c.jsonStrings.ScanRow(dest, row) } -func (jCol *JSONList) upsertValue(name string, ct string) (*JSONValue, error) { - // check if column exists and reuse if same type, error if same name and different type - jObj := jCol.values.(*JSONObject) - cols := jObj.columns - for i := range cols { - sCol := cols[i] - if sCol.Name() == name { - vCol, ok := cols[i].(*JSONValue) - if !ok { - sType := cols[i].Type() - return nil, &Error{ - ColumnType: fmt.Sprint(sType), - Err: fmt.Errorf("type mismatch in column %s - expected value, got %s", name, sType), - } - } - tType := vCol.Interface.Type() - if tType != Type(ct) { - return nil, &Error{ - ColumnType: ct, - Err: fmt.Errorf("type mismatch in column %s - expected %s, got %s", name, tType, ct), - } +func (c *JSON) Append(v any) (nulls []uint8, err error) { + switch c.serializationVersion { + case JSONObjectSerializationVersion: + return c.appendObject(v) + case JSONStringSerializationVersion: + return c.appendString(v) + default: + // Unset serialization preference, try string first unless its specifically JSON + switch v.(type) { + case []chcol.JSON: + c.serializationVersion = JSONObjectSerializationVersion + return c.appendObject(v) + case []*chcol.JSON: + c.serializationVersion = JSONObjectSerializationVersion + return c.appendObject(v) + case []chcol.JSONSerializer: + c.serializationVersion = JSONObjectSerializationVersion + return c.appendObject(v) + } + + var err error + if _, err = c.appendString(v); err == nil { + c.serializationVersion = JSONStringSerializationVersion + return nil, nil + } else if _, err = c.appendObject(v); err == nil { + c.serializationVersion = JSONObjectSerializationVersion + return nil, nil + } + + return nil, fmt.Errorf("unsupported type \"%s\" for JSON column, must use slice of string, []byte, struct, map, or *%s: %w", reflect.TypeOf(v).String(), scanTypeJSON.String(), err) + } +} + +func (c *JSON) appendObject(v any) (nulls []uint8, err error) { + switch vv := v.(type) { + case []chcol.JSON: + for i, obj := range vv { + err := c.AppendRow(obj) + if err != nil { + return nil, fmt.Errorf("failed to AppendRow at index %d: %w", i, err) } - return vCol, nil } - } - col, err := Type(ct).Column(name, jObj.tz) - if err != nil { - return nil, err - } - vCol := &JSONValue{ - Interface: col, - } - jCol.values.(*JSONObject).columns = append(cols, vCol) // nolint:gocritic - return vCol, nil -} - -func (jCol *JSONList) upsertList(name string) (*JSONList, error) { - // check if column exists and reuse if same type, error if same name and different type - jObj := jCol.values.(*JSONObject) - cols := jCol.values.(*JSONObject).columns - for i := range cols { - sCol := cols[i] - if sCol.Name() == name { - sCol, ok := cols[i].(*JSONList) - if !ok { - return nil, &Error{ - ColumnType: fmt.Sprint(cols[i].Type()), - Err: fmt.Errorf("type mismatch in column %s - expected list, got %s", name, cols[i].Type()), - } + + return nil, nil + case []*chcol.JSON: + for i, obj := range vv { + err := c.AppendRow(obj) + if err != nil { + return nil, fmt.Errorf("failed to AppendRow at index %d: %w", i, err) } - return sCol, nil } - } - lCol := createJSONList(name, jObj.tz) - jCol.values.(*JSONObject).columns = append(cols, lCol) // nolint:gocritic - return lCol, nil -} - -func (jCol *JSONList) upsertObject(name string) (*JSONObject, error) { - // check if column exists and reuse if same type, error if same name and different type - jObj := jCol.values.(*JSONObject) - cols := jObj.columns - for i := range cols { - sCol := cols[i] - if sCol.Name() == name { - sCol, ok := cols[i].(*JSONObject) - if !ok { - sType := cols[i].Type() - return nil, &Error{ - ColumnType: fmt.Sprint(sType), - Err: fmt.Errorf("type mismatch in column %s, expected object got %s", name, sType), - } + return nil, nil + case []chcol.JSONSerializer: + for i, obj := range vv { + err := c.AppendRow(obj) + if err != nil { + return nil, fmt.Errorf("failed to AppendRow at index %d: %w", i, err) } - return sCol, nil } + + return nil, nil } - // lists are represented as Nested which are in turn encoded as Array(Tuple()). We thus pass a Array(JSONObject()) - // as this encodes like a tuple - oCol := &JSONObject{ - name: name, - tz: jObj.tz, - } - jCol.values.(*JSONObject).columns = append(cols, oCol) // nolint:gocritic - return oCol, nil -} -func (jCol *JSONList) Type() Type { - cols := jCol.values.(*JSONObject).columns - subTypes := make([]string, len(cols)) - for i, v := range cols { - subTypes[i] = string(v.Type()) + value := reflect.Indirect(reflect.ValueOf(v)) + if value.Kind() != reflect.Slice { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(c.chType), + From: fmt.Sprintf("%T", v), + Hint: "value must be a slice", + } } - // can be a list of lists or a nested - if jCol.isNested { - return Type(fmt.Sprintf("%s Nested(%s)", jCol.name, strings.Join(subTypes, ", "))) + for i := 0; i < value.Len(); i++ { + if err := c.AppendRow(value.Index(i)); err != nil { + return nil, err + } } - return Type(fmt.Sprintf("%s Array(%s)", jCol.name, strings.Join(subTypes, ", "))) -} -type JSONObject struct { - columns []JSON - name string - root bool - encoding uint8 - tz *time.Location + return nil, nil } -func (jCol *JSONObject) Reset() { - for i := range jCol.columns { - jCol.columns[i].Reset() +func (c *JSON) appendString(v any) (nulls []uint8, err error) { + nulls, err = c.jsonStrings.Append(v) + if err != nil { + return nil, err } -} -func (jCol *JSONObject) Name() string { - return jCol.name + c.rows = c.jsonStrings.Rows() + return nulls, nil } -func (jCol *JSONObject) columnNames() []string { - columns := make([]string, len(jCol.columns), len(jCol.columns)) - for i := range jCol.columns { - columns[i] = jCol.columns[i].Name() - } - return columns -} +func (c *JSON) AppendRow(v any) error { + switch c.serializationVersion { + case JSONObjectSerializationVersion: + return c.appendRowObject(v) + case JSONStringSerializationVersion: + return c.appendRowString(v) + default: + // Unset serialization preference, try string first unless its specifically JSON + switch v.(type) { + case chcol.JSON: + c.serializationVersion = JSONObjectSerializationVersion + return c.appendRowObject(v) + case *chcol.JSON: + c.serializationVersion = JSONObjectSerializationVersion + return c.appendRowObject(v) + case chcol.JSONSerializer: + c.serializationVersion = JSONObjectSerializationVersion + return c.appendRowObject(v) + } + + var err error + if err = c.appendRowString(v); err == nil { + c.serializationVersion = JSONStringSerializationVersion + return nil + } else if err = c.appendRowObject(v); err == nil { + c.serializationVersion = JSONObjectSerializationVersion + return nil + } -func (jCol *JSONObject) rows() int { - return jCol.Rows() + return fmt.Errorf("unsupported type \"%s\" for JSON column, must use string, []byte, *struct, map, or *clickhouse.JSON: %w", reflect.TypeOf(v).String(), err) + } } -func (jCol *JSONObject) appendEmptyValue() error { - for i := range jCol.columns { - if err := jCol.columns[i].appendEmptyValue(); err != nil { - return err +func (c *JSON) appendRowObject(v any) error { + var obj *chcol.JSON + switch vv := v.(type) { + case chcol.JSON: + obj = &vv + case *chcol.JSON: + obj = vv + case chcol.JSONSerializer: + var err error + obj, err = vv.SerializeClickHouseJSON() + if err != nil { + return fmt.Errorf("failed to serialize using SerializeClickHouseJSON: %w", err) } } - return nil -} -func (jCol *JSONObject) insertEmptyColumn(name string) error { - for i := range jCol.columns { - if jCol.columns[i].Name() == name { - if err := jCol.columns[i].appendEmptyValue(); err != nil { - return err + if obj == nil && v != nil { + var err error + switch val := reflect.ValueOf(v); val.Kind() { + case reflect.Pointer: + if val.Elem().Kind() == reflect.Struct { + obj, err = structToJSON(v) + } else if val.Elem().Kind() == reflect.Map { + obj, err = mapToJSON(v) } - return nil + case reflect.Struct: + obj, err = structToJSON(v) + case reflect.Map: + obj, err = mapToJSON(v) } - } - return &Error{ - ColumnType: "unknown", - Err: fmt.Errorf("column %s is missing - empty value cannot be appended", name), - } -} -func (jCol *JSONObject) upsertValue(name string, ct string) (*JSONValue, error) { - for i := range jCol.columns { - sCol := jCol.columns[i] - if sCol.Name() == name { - vCol, ok := jCol.columns[i].(*JSONValue) - if !ok { - sType := jCol.columns[i].Type() - return nil, &Error{ - ColumnType: fmt.Sprint(sType), - Err: fmt.Errorf("type mismatch in column %s, expected value got %s", name, sType), - } - } - if vCol.Interface.Type() != Type(ct) { - return nil, &Error{ - ColumnType: ct, - Err: fmt.Errorf("type mismatch in column %s, expected %s got %s", name, vCol.Interface.Type(), ct), - } - } - return vCol, nil + if err != nil { + return fmt.Errorf("failed to convert value to JSON: %w", err) } } - col, err := Type(ct).Column(name, jCol.tz) - if err != nil { - return nil, err + + if obj == nil { + obj = chcol.NewJSON() } - vCol := &JSONValue{ - Interface: col, + valuesByPath := obj.ValuesByPath() + + // Match typed paths first + for i, typedPath := range c.typedPaths { + // Even if value is nil, we must append a value for this row. + // nil is a valid value for most column types, with most implementations putting a zero value. + // If the column doesn't support appending nil, then the user must provide a zero value. + value, _ := valuesByPath[typedPath] + + col := c.typedColumns[i] + err := col.AppendRow(value) + if err != nil { + return fmt.Errorf("failed to append type %s to json column at typed path %s: %w", col.Type(), typedPath, err) + } } - jCol.columns = append(jCol.columns, vCol) - return vCol, nil -} -func (jCol *JSONObject) upsertList(name string) (*JSONList, error) { - for i := range jCol.columns { - sCol := jCol.columns[i] - if sCol.Name() == name { - sCol, ok := jCol.columns[i].(*JSONList) - if !ok { - sType := jCol.columns[i].Type() - return nil, &Error{ - ColumnType: fmt.Sprint(sType), - Err: fmt.Errorf("type mismatch in column %s, expected list got %s", name, sType), - } - } - return sCol, nil + // Verify all dynamic paths have an equal number of rows by appending nil for all unspecified dynamic paths + for _, dynamicPath := range c.dynamicPaths { + if _, ok := valuesByPath[dynamicPath]; !ok { + valuesByPath[dynamicPath] = nil } } - lCol := createJSONList(name, jCol.tz) - jCol.columns = append(jCol.columns, lCol) - return lCol, nil -} -func (jCol *JSONObject) upsertObject(name string) (*JSONObject, error) { - // check if it exists - for i := range jCol.columns { - sCol := jCol.columns[i] - if sCol.Name() == name { - sCol, ok := jCol.columns[i].(*JSONObject) - if !ok { - sType := jCol.columns[i].Type() - return nil, &Error{ - ColumnType: fmt.Sprint(sType), - Err: fmt.Errorf("type mismatch in column %s, expected object got %s", name, sType), + // Match or add dynamic paths + for objPath, value := range valuesByPath { + if c.hasTypedPath(objPath) || c.hasSkipPath(objPath) { + continue + } + + if dynamicPathIndex, ok := c.dynamicPathsIndex[objPath]; ok { + err := c.dynamicColumns[dynamicPathIndex].AppendRow(value) + if err != nil { + return fmt.Errorf("failed to append to json column at dynamic path \"%s\": %w", objPath, err) + } + } else { + // Path doesn't exist, add new dynamic path + column + parsedColDynamic, _ := Type("Dynamic").Column("", c.tz) + colDynamic := parsedColDynamic.(*Dynamic) + + // New path must back-fill nils for each row + for i := 0; i < c.rows; i++ { + err := colDynamic.AppendRow(nil) + if err != nil { + return fmt.Errorf("failed to back-fill json column at new dynamic path \"%s\" index %d: %w", objPath, i, err) } } - return sCol, nil + + err := colDynamic.AppendRow(value) + if err != nil { + return fmt.Errorf("failed to append to json column at new dynamic path \"%s\": %w", objPath, err) + } + + c.dynamicPaths = append(c.dynamicPaths, objPath) + c.dynamicPathsIndex[objPath] = len(c.dynamicPaths) - 1 + c.dynamicColumns = append(c.dynamicColumns, colDynamic) + c.totalDynamicPaths++ } } - // not present so create - oCol := &JSONObject{ - name: name, - tz: jCol.tz, + + c.rows++ + return nil +} + +func (c *JSON) appendRowString(v any) error { + err := c.jsonStrings.AppendRow(v) + if err != nil { + return err } - jCol.columns = append(jCol.columns, oCol) - return oCol, nil + + c.rows++ + return nil } -func (jCol *JSONObject) Type() Type { - if jCol.root { - return "Object('json')" +func (c *JSON) encodeObjectHeader(buffer *proto.Buffer) { + buffer.PutUVarInt(uint64(c.maxDynamicPaths)) + buffer.PutUVarInt(uint64(c.totalDynamicPaths)) + + for _, dynamicPath := range c.dynamicPaths { + buffer.PutString(dynamicPath) + } + + for _, col := range c.dynamicColumns { + col.encodeHeader(buffer) } - return jCol.FullType() } -func (jCol *JSONObject) FullType() Type { - subTypes := make([]string, len(jCol.columns)) - for i, v := range jCol.columns { - subTypes[i] = string(v.Type()) +func (c *JSON) encodeObjectData(buffer *proto.Buffer) { + for _, col := range c.typedColumns { + col.Encode(buffer) + } + + for _, col := range c.dynamicColumns { + col.encodeData(buffer) } - if jCol.root { - return Type(fmt.Sprintf("Tuple(%s)", strings.Join(subTypes, ", "))) + + // SharedData per row, empty for now. + for i := 0; i < c.rows; i++ { + buffer.PutUInt64(0) } - return Type(fmt.Sprintf("%s Tuple(%s)", jCol.name, strings.Join(subTypes, ", "))) } -func (jCol *JSONObject) ScanType() reflect.Type { - return scanTypeMap +func (c *JSON) encodeStringData(buffer *proto.Buffer) { + c.jsonStrings.Encode(buffer) } -func (jCol *JSONObject) Rows() int { - if len(jCol.columns) != 0 { - return jCol.columns[0].Rows() +func (c *JSON) WriteStatePrefix(buffer *proto.Buffer) error { + switch c.serializationVersion { + case JSONObjectSerializationVersion: + buffer.PutUInt64(JSONObjectSerializationVersion) + c.encodeObjectHeader(buffer) + + return nil + case JSONStringSerializationVersion: + buffer.PutUInt64(JSONStringSerializationVersion) + + return nil + default: + // If the column is an array, it can be empty but still require a prefix. + // Use string encoding since it's smaller + buffer.PutUInt64(JSONStringSerializationVersion) + + return nil } - return 0 } -// ClickHouse returns JSON as a tuple i.e. these will never be invoked - -func (jCol *JSONObject) Row(i int, ptr bool) any { - panic("Not implemented") +func (c *JSON) Encode(buffer *proto.Buffer) { + switch c.serializationVersion { + case JSONObjectSerializationVersion: + c.encodeObjectData(buffer) + return + case JSONStringSerializationVersion: + c.encodeStringData(buffer) + return + } } -func (jCol *JSONObject) ScanRow(dest any, row int) error { - panic("Not implemented") +func (c *JSON) ScanType() reflect.Type { + return scanTypeJSON } -func (jCol *JSONObject) Append(v any) (nulls []uint8, err error) { - jSlice := reflect.ValueOf(v) - if jSlice.Kind() != reflect.Slice { - return nil, &ColumnConverterError{ - Op: "Append", - To: string(jCol.Type()), - From: fmt.Sprintf("slice of structs/map or strings required - received %T", v), +func (c *JSON) Reset() { + c.rows = 0 + + switch c.serializationVersion { + case JSONObjectSerializationVersion: + for _, col := range c.typedColumns { + col.Reset() } - } - for i := 0; i < jSlice.Len(); i++ { - if err := jCol.AppendRow(jSlice.Index(i).Interface()); err != nil { - return nil, err + + for _, col := range c.dynamicColumns { + col.Reset() } + + return + case JSONStringSerializationVersion: + c.jsonStrings.Reset() + return } - return nil, nil } -func (jCol *JSONObject) AppendRow(v any) error { - if reflect.ValueOf(v).Kind() == reflect.Struct || reflect.ValueOf(v).Kind() == reflect.Map { - if jCol.columns != nil && jCol.encoding == 1 { - return &Error{ - ColumnType: fmt.Sprint(jCol.Type()), - Err: fmt.Errorf("encoding of JSON columns cannot be mixed in a batch - %s cannot be added as previously String", reflect.ValueOf(v).Kind()), - } +func (c *JSON) decodeObjectHeader(reader *proto.Reader) error { + maxDynamicPaths, err := reader.UVarInt() + if err != nil { + return fmt.Errorf("failed to read max dynamic paths for json column: %w", err) + } + c.maxDynamicPaths = int(maxDynamicPaths) + + totalDynamicPaths, err := reader.UVarInt() + if err != nil { + return fmt.Errorf("failed to read total dynamic paths for json column: %w", err) + } + c.totalDynamicPaths = int(totalDynamicPaths) + + c.dynamicPaths = make([]string, 0, totalDynamicPaths) + for i := 0; i < int(totalDynamicPaths); i++ { + dynamicPath, err := reader.Str() + if err != nil { + return fmt.Errorf("failed to read dynamic path name bytes at index %d for json column: %w", i, err) } - err := appendStructOrMap(jCol, v) - return err + + c.dynamicPaths = append(c.dynamicPaths, dynamicPath) + c.dynamicPathsIndex[dynamicPath] = len(c.dynamicPaths) - 1 } - switch v := v.(type) { - case string: - if jCol.columns != nil && jCol.encoding == 0 { - return &Error{ - ColumnType: fmt.Sprint(jCol.Type()), - Err: fmt.Errorf("encoding of JSON columns cannot be mixed in a batch - %s cannot be added as previously Struct/Map", reflect.ValueOf(v).Kind()), - } + + c.dynamicColumns = make([]*Dynamic, 0, totalDynamicPaths) + for _, dynamicPath := range c.dynamicPaths { + parsedColDynamic, _ := Type("Dynamic").Column("", c.tz) + colDynamic := parsedColDynamic.(*Dynamic) + + err := colDynamic.decodeHeader(reader) + if err != nil { + return fmt.Errorf("failed to decode dynamic header at path %s for json column: %w", dynamicPath, err) } - jCol.encoding = 1 - if jCol.columns == nil { - jCol.columns = append(jCol.columns, &JSONValue{Interface: &String{}}) + + c.dynamicColumns = append(c.dynamicColumns, colDynamic) + } + + return nil +} + +func (c *JSON) decodeObjectData(reader *proto.Reader, rows int) error { + for i, col := range c.typedColumns { + typedPath := c.typedPaths[i] + + err := col.Decode(reader, rows) + if err != nil { + return fmt.Errorf("failed to decode %s typed path %s for json column: %w", col.Type(), typedPath, err) } - jCol.columns[0].AppendRow(v) - default: - return &ColumnConverterError{ - Op: "AppendRow", - To: "String", - From: fmt.Sprintf("json row must be struct, map or string - received %T", v), + } + + for i, col := range c.dynamicColumns { + dynamicPath := c.dynamicPaths[i] + + err := col.decodeData(reader, rows) + if err != nil { + return fmt.Errorf("failed to decode dynamic path %s for json column: %w", dynamicPath, err) } } + + // SharedData per row, ignored for now. May cause stream offset issues if present + _, err := reader.ReadRaw(8 * rows) // one UInt64 per row + if err != nil { + return fmt.Errorf("failed to read shared data for json column: %w", err) + } + return nil } -func (jCol *JSONObject) Decode(reader *proto.Reader, rows int) error { - panic("Not implemented") +func (c *JSON) decodeStringData(reader *proto.Reader, rows int) error { + return c.jsonStrings.Decode(reader, rows) } -func (jCol *JSONObject) Encode(buffer *proto.Buffer) { - if jCol.root && jCol.encoding == 0 { - buffer.PutString(string(jCol.FullType())) +func (c *JSON) ReadStatePrefix(reader *proto.Reader) error { + jsonSerializationVersion, err := reader.UInt64() + if err != nil { + return fmt.Errorf("failed to read json serialization version: %w", err) } - for _, c := range jCol.columns { - c.Encode(buffer) + + c.serializationVersion = jsonSerializationVersion + + switch jsonSerializationVersion { + case JSONObjectSerializationVersion: + err := c.decodeObjectHeader(reader) + if err != nil { + return fmt.Errorf("failed to decode json object header: %w", err) + } + + return nil + case JSONStringSerializationVersion: + return nil + default: + return fmt.Errorf("unsupported JSON serialization version for prefix decode: %d", jsonSerializationVersion) } } -func (jCol *JSONObject) ReadStatePrefix(reader *proto.Reader) error { - _, err := reader.UInt8() - return err -} +func (c *JSON) Decode(reader *proto.Reader, rows int) error { + c.rows = rows -func (jCol *JSONObject) WriteStatePrefix(buffer *proto.Buffer) error { - buffer.PutUInt8(jCol.encoding) - return nil -} + switch c.serializationVersion { + case JSONObjectSerializationVersion: + err := c.decodeObjectData(reader, rows) + if err != nil { + return fmt.Errorf("failed to decode json object data: %w", err) + } -var ( - _ Interface = (*JSONObject)(nil) - _ CustomSerialization = (*JSONObject)(nil) -) + return nil + case JSONStringSerializationVersion: + err := c.decodeStringData(reader, rows) + if err != nil { + return fmt.Errorf("failed to decode json string data: %w", err) + } + + return nil + default: + return fmt.Errorf("unsupported JSON serialization version for decode: %d", c.serializationVersion) + } +} + +// splitWithDelimiters splits the string while considering backticks and parentheses +func splitWithDelimiters(s string) []string { + var parts []string + var currentPart strings.Builder + var brackets int + inBackticks := false + + for i := 0; i < len(s); i++ { + switch s[i] { + case '`': + inBackticks = !inBackticks + currentPart.WriteByte(s[i]) + case '(': + brackets++ + currentPart.WriteByte(s[i]) + case ')': + brackets-- + currentPart.WriteByte(s[i]) + case ',': + if !inBackticks && brackets == 0 { + parts = append(parts, currentPart.String()) + currentPart.Reset() + } else { + currentPart.WriteByte(s[i]) + } + default: + currentPart.WriteByte(s[i]) + } + } + + if currentPart.Len() > 0 { + parts = append(parts, currentPart.String()) + } + + return parts +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json_reflect.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json_reflect.go new file mode 100644 index 00000000..3b48d14b --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json_reflect.go @@ -0,0 +1,450 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package column + +import ( + "fmt" + "reflect" + "strings" + + "github.com/ClickHouse/clickhouse-go/v2/lib/chcol" +) + +// Decoding (Scanning) + +// scanIntoStruct will iterate the provided struct and scan JSON data into the matching fields +func (c *JSON) scanIntoStruct(dest any, row int) error { + val := reflect.ValueOf(dest) + if val.Kind() != reflect.Pointer { + return fmt.Errorf("destination must be a pointer") + } + val = val.Elem() + + if val.Kind() != reflect.Struct { + return fmt.Errorf("destination must be a pointer to struct") + } + + return c.fillStruct(val, "", row) +} + +// scanIntoMap converts JSON data into a map +func (c *JSON) scanIntoMap(dest any, row int) error { + val := reflect.ValueOf(dest) + if val.Kind() != reflect.Pointer { + return fmt.Errorf("destination must be a pointer") + } + val = val.Elem() + + if val.Kind() != reflect.Map { + return fmt.Errorf("destination must be a pointer to map") + } + + if val.Type().Key().Kind() != reflect.String { + return fmt.Errorf("map key must be string") + } + + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + return c.fillMap(val, "", row) +} + +// fillStruct will iterate the provided struct and scan JSON data into the matching fields recursively +func (c *JSON) fillStruct(val reflect.Value, prefix string, row int) error { + typ := val.Type() + + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + fieldType := typ.Field(i) + + if !field.CanSet() { + continue + } + + name := fieldType.Tag.Get("json") + if name == "" || name[0] == ',' { + name = fieldType.Name + } else { + name = strings.Split(name, ",")[0] + } + + if name == "-" { + continue + } + + path := name + if prefix != "" { + path = prefix + "." + name + } + + if c.hasTypedPath(path) { + err := c.scanTypedPathToValue(path, row, field) + if err != nil { + return fmt.Errorf("fillStruct failed to scan typed path: %w", err) + } + + continue + } else if c.hasDynamicPath(path) { + err := c.scanDynamicPathToValue(path, row, field) + if err != nil { + return fmt.Errorf("fillStruct failed to scan dynamic path: %w", err) + } + + continue + } + + hasNestedFields := c.pathHasNestedValues(path) + if !hasNestedFields { + continue + } + + switch field.Kind() { + case reflect.Pointer: + if field.Type().Elem().Kind() == reflect.Struct { + if field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + + if err := c.fillStruct(field.Elem(), path, row); err != nil { + return fmt.Errorf("error filling nested struct pointer: %w", err) + } + } + case reflect.Struct: + if err := c.fillStruct(field, path, row); err != nil { + return fmt.Errorf("error filling nested struct: %w", err) + } + case reflect.Map: + if err := c.fillMap(field, path, row); err != nil { + return fmt.Errorf("error filling nested map: %w", err) + } + } + } + + return nil +} + +// fillMap will iterate the provided map and scan JSON data in recursively +func (c *JSON) fillMap(val reflect.Value, prefix string, row int) error { + if val.IsNil() { + val.Set(reflect.MakeMap(val.Type())) + } + + var paths []string + for _, path := range c.typedPaths { + if strings.HasPrefix(path, prefix) { + paths = append(paths, path) + } + } + for _, path := range c.dynamicPaths { + if strings.HasPrefix(path, prefix) { + paths = append(paths, path) + } + } + + children := make(map[string][]string) + prefixLen := len(prefix) + if prefixLen > 0 { + prefixLen++ // splitter + } + + for _, path := range paths { + if prefixLen >= len(path) { + continue + } + + suffix := path[prefixLen:] + nextDot := strings.Index(suffix, ".") + var current string + if nextDot == -1 { + current = suffix + } else { + current = suffix[:nextDot] + } + children[current] = append(children[current], path) + } + + for key, childPaths := range children { + noChildNodes := true + for _, path := range childPaths { + if strings.Contains(path[prefixLen:], ".") { + noChildNodes = false + break + } + } + + if noChildNodes { + fullPath := prefix + if prefix != "" { + fullPath += "." + } + fullPath += key + + mapValueType := val.Type().Elem() + newVal := reflect.New(mapValueType).Elem() + + var err error + if _, isTyped := c.typedPathsIndex[fullPath]; isTyped { + err = c.scanTypedPathToValue(fullPath, row, newVal) + } else { + if mapValueType.Kind() == reflect.Interface { + value := c.valueAtPath(fullPath, row, false) + if dyn, ok := value.(chcol.Dynamic); ok { + value = dyn.Any() + } + + if value != nil { + newVal.Set(reflect.ValueOf(value)) + } + } else { + err = c.scanDynamicPathToValue(fullPath, row, newVal) + } + } + if err != nil { + return fmt.Errorf("failed to scan value at path \"%s\": %w", fullPath, err) + } + + val.SetMapIndex(reflect.ValueOf(key), newVal) + } else { + newPrefix := prefix + if newPrefix != "" { + newPrefix += "." + } + newPrefix += key + + mapValueType := val.Type().Elem() + var newMap reflect.Value + + if mapValueType.Kind() == reflect.Interface { + newMap = reflect.MakeMap(reflect.TypeOf(map[string]interface{}{})) + } else if mapValueType.Kind() == reflect.Map { + newMap = reflect.MakeMap(mapValueType) + } else { + return fmt.Errorf("invalid map value type for nested path \"%s\"", newPrefix) + } + + err := c.fillMap(newMap, newPrefix, row) + if err != nil { + return fmt.Errorf("failed filling nested map at path \"%s\": %w", newPrefix, err) + } + + val.SetMapIndex(reflect.ValueOf(key), newMap) + } + } + + return nil +} + +// Encoding (Append, AppendRow) + +// structToJSON converts a struct to JSON data +func structToJSON(v any) (*chcol.JSON, error) { + json := chcol.NewJSON() + val := reflect.ValueOf(v) + + if val.Kind() == reflect.Pointer { + val = val.Elem() + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct, got %v", val.Kind()) + } + + err := iterateStruct(val, "", json) + if err != nil { + return nil, err + } + + return json, nil +} + +// mapToJSON converts a map to JSON data +func mapToJSON(v any) (*chcol.JSON, error) { + json := chcol.NewJSON() + val := reflect.ValueOf(v) + + if val.Kind() == reflect.Pointer { + val = val.Elem() + } + + if val.Kind() != reflect.Map { + return nil, fmt.Errorf("expected map, got %v", val.Kind()) + } + + if val.Type().Key().Kind() != reflect.String { + return nil, fmt.Errorf("map key must be string, got %v", val.Type().Key().Kind()) + } + + err := iterateMap(val, "", json) + if err != nil { + return nil, err + } + + return json, nil +} + +// iterateStruct recursively iterates through a struct and adds its fields to the JSON data +func iterateStruct(val reflect.Value, prefix string, json *chcol.JSON) error { + typ := val.Type() + + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + fieldType := typ.Field(i) + + if !field.CanInterface() { + continue + } + + name := fieldType.Tag.Get("json") + if name == "" || name[0] == ',' { + name = fieldType.Name + } else { + // handle `json:"name,omitempty"` + name = strings.Split(name, ",")[0] + } + + if name == "-" { + continue + } + + path := name + if prefix != "" { + path = prefix + "." + name + } + + forcedType := fieldType.Tag.Get("chType") + err := handleValue(field, path, json, forcedType) + if err != nil { + return err + } + } + + return nil +} + +// iterateStructSkipTypes is a set of struct types that will not be iterated. +// Instead, the value will be assigned directly for use within Dynamic row appending. +var iterateStructSkipTypes = map[reflect.Type]struct{}{ + scanTypeIP: {}, + scanTypeUUID: {}, + scanTypeTime: {}, + scanTypeTime: {}, + scanTypeRing: {}, + scanTypePoint: {}, + scanTypeBigInt: {}, + scanTypePolygon: {}, + scanTypeDecimal: {}, + scanTypeMultiPolygon: {}, + scanTypeVariant: {}, + scanTypeDynamic: {}, + scanTypeJSON: {}, +} + +// handleValue processes a single value and adds it to the JSON data +func handleValue(val reflect.Value, path string, json *chcol.JSON, forcedType string) error { + if val.Kind() == reflect.Interface { + val = val.Elem() + } + + if !val.IsValid() { + json.SetValueAtPath(path, nil) + return nil + } + + switch val.Kind() { + case reflect.Pointer: + if val.IsNil() { + json.SetValueAtPath(path, nil) + return nil + } + return handleValue(val.Elem(), path, json, forcedType) + + case reflect.Struct: + if _, ok := iterateStructSkipTypes[val.Type()]; ok { + json.SetValueAtPath(path, val.Interface()) + return nil + } + + return iterateStruct(val, path, json) + + case reflect.Map: + if forcedType == "" && val.Type().Elem().Kind() == reflect.Interface { + // Only iterate maps if they are map[string]interface{} + return iterateMap(val, path, json) + } else if forcedType == "" { + json.SetValueAtPath(path, val.Interface()) + return nil + } else { + json.SetValueAtPath(path, chcol.NewDynamicWithType(val.Interface(), forcedType)) + return nil + } + case reflect.Slice, reflect.Array: + if forcedType == "" { + json.SetValueAtPath(path, val.Interface()) + } else { + json.SetValueAtPath(path, chcol.NewDynamicWithType(val.Interface(), forcedType)) + } + return nil + default: + if forcedType == "" { + json.SetValueAtPath(path, val.Interface()) + } else { + json.SetValueAtPath(path, chcol.NewDynamicWithType(val.Interface(), forcedType)) + } + return nil + } +} + +const MaxMapPathDepth = 32 + +// iterateMap recursively iterates through a map and adds its values to the JSON data +func iterateMap(val reflect.Value, prefix string, json *chcol.JSON) error { + depth := len(strings.Split(prefix, ".")) + if depth > MaxMapPathDepth { + return fmt.Errorf("maximum nesting depth exceeded") + } + + for _, key := range val.MapKeys() { + if key.Kind() != reflect.String { + return fmt.Errorf("map key must be string, got %v", key.Kind()) + } + + path := key.String() + if prefix != "" { + path = prefix + "." + path + } + + mapValue := val.MapIndex(key) + + if mapValue.Kind() == reflect.Interface { + mapValue = mapValue.Elem() + } + + if mapValue.Kind() == reflect.Map { + if err := iterateMap(mapValue, path, json); err != nil { + return err + } + } else { + if err := handleValue(mapValue, path, json, ""); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go index 52933c67..393d4dad 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/lowcardinality.go @@ -220,6 +220,9 @@ func (col *LowCardinality) Encode(buffer *proto.Buffer) { }() ixLen := uint64(len(col.append.index)) switch { + case col.keys().Rows() > 0: + // We already have keys, so this column is probably in a block directly decoded from the server, and we should + // not reset them case ixLen < math.MaxUint8: col.key = keyUInt8 for _, v := range col.append.keys { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go index 85ccbe9e..090aa629 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go @@ -66,7 +66,17 @@ func (col *Map) Name() string { func (col *Map) parse(t Type, tz *time.Location) (_ Interface, err error) { col.chType = t - if types := strings.SplitN(t.params(), ",", 2); len(types) == 2 { + types := make([]string, 2, 2) + typeParams := t.params() + idx := strings.Index(typeParams, ",") + if strings.HasPrefix(typeParams, "Enum") { + idx = strings.Index(typeParams, "),") + 1 + } + if idx > 0 { + types[0] = typeParams[:idx] + types[1] = typeParams[idx+1:] + } + if types[0] != "" && types[1] != "" { if col.keys, err = Type(strings.TrimSpace(types[0])).Column(col.name, tz); err != nil { return nil, err } @@ -159,6 +169,15 @@ func (col *Map) Append(v any) (nulls []uint8, err error) { } func (col *Map) AppendRow(v any) error { + if v == nil { + return &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("try using %s", col.scanType), + } + } + value := reflect.Indirect(reflect.ValueOf(v)) if value.Type() == col.scanType { var ( @@ -309,9 +328,19 @@ func (col *Map) row(n int) reflect.Value { from = int(prev) ) for next := 0; next < size; next++ { + mapValue := col.values.Row(from+next, false) + var mapReflectValue reflect.Value + if mapValue == nil { + // Convert interface{} nil to typed nil (such as nil *string) to preserve map element + // https://github.com/ClickHouse/clickhouse-go/issues/1515 + mapReflectValue = reflect.New(value.Type().Elem()).Elem() + } else { + mapReflectValue = reflect.ValueOf(mapValue) + } + value.SetMapIndex( reflect.ValueOf(col.keys.Row(from+next, false)), - reflect.ValueOf(col.values.Row(from+next, false)), + mapReflectValue, ) } return value diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go index d961bd02..77b09309 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/nested.go @@ -21,6 +21,8 @@ import ( "fmt" "strings" "time" + + "github.com/ClickHouse/ch-go/proto" ) type Nested struct { @@ -86,4 +88,22 @@ func nestedColumns(raw string) (columns []namedCol) { return } +func (col *Nested) ReadStatePrefix(reader *proto.Reader) error { + if serialize, ok := col.Interface.(CustomSerialization); ok { + if err := serialize.ReadStatePrefix(reader); err != nil { + return err + } + } + return nil +} + +func (col *Nested) WriteStatePrefix(buffer *proto.Buffer) error { + if serialize, ok := col.Interface.(CustomSerialization); ok { + if err := serialize.WriteStatePrefix(buffer); err != nil { + return err + } + } + return nil +} + var _ Interface = (*Nested)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/object_json.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/object_json.go new file mode 100644 index 00000000..2806fea5 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/object_json.go @@ -0,0 +1,953 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package column + +import ( + "fmt" + "reflect" + "strings" + "time" + + "github.com/ClickHouse/ch-go/proto" +) + +// This JSON type implementation was done for an experimental Object('JSON') type: +// https://clickhouse.com/docs/en/sql-reference/data-types/object-data-type +// It's already deprecated in ClickHouse and will be removed in the future. +// Since ClickHouse 24.8, the Object('JSON') type is no longer alias for JSON type. +// The new JSON type has been introduced: https://clickhouse.com/docs/en/sql-reference/data-types/newjson +// However, the new JSON type is not supported by the driver yet. +// +// This implementation is kept for backward compatibility and will be removed in the future. TODO: remove this + +// inverse mapping - go types to clickhouse types +var kindMappings = map[reflect.Kind]string{ + reflect.String: "String", + reflect.Int: "Int64", + reflect.Int8: "Int8", + reflect.Int16: "Int16", + reflect.Int32: "Int32", + reflect.Int64: "Int64", + reflect.Uint: "UInt64", + reflect.Uint8: "UInt8", + reflect.Uint16: "UInt16", + reflect.Uint32: "UInt32", + reflect.Uint64: "UInt64", + reflect.Float32: "Float32", + reflect.Float64: "Float64", + reflect.Bool: "Bool", +} + +// complex types for which a mapping exists - currently we map to String but could enhance in the future for other types +var typeMappings = map[string]struct{}{ + // currently JSON doesn't support DateTime, Decimal or IP so mapped to String + "time.Time": {}, + "decimal.Decimal": {}, + "net.IP": {}, + "uuid.UUID": {}, +} + +type ObjectJSON interface { + Interface + appendEmptyValue() error +} + +type JSONParent interface { + upsertValue(name string, ct string) (*JSONValue, error) + upsertList(name string) (*JSONList, error) + upsertObject(name string) (*JSONObject, error) + insertEmptyColumn(name string) error + columnNames() []string + rows() int +} + +func parseType(name string, vType reflect.Type, values any, isArray bool, jCol JSONParent, numEmpty int) error { + _, ok := typeMappings[vType.String()] + if !ok { + return &UnsupportedColumnTypeError{ + t: Type(vType.String()), + } + } + ct := "String" + if isArray { + ct = fmt.Sprintf("Array(%s)", ct) + } + col, err := jCol.upsertValue(name, ct) + if err != nil { + return err + } + col.origType = vType + + //pre pad with empty - e.g. for new values in maps + for i := 0; i < numEmpty; i++ { + if isArray { + // empty array for nil of the right type + err = col.AppendRow([]string{}) + } else { + // empty value of the type + err = col.AppendRow(fmt.Sprint(reflect.New(vType).Elem().Interface())) + } + if err != nil { + return err + } + } + if isArray { + iValues := reflect.ValueOf(values) + sValues := make([]string, iValues.Len(), iValues.Len()) + for i := 0; i < iValues.Len(); i++ { + sValues[i] = fmt.Sprint(iValues.Index(i).Interface()) + } + return col.AppendRow(sValues) + } + return col.AppendRow(fmt.Sprint(values)) +} + +func parsePrimitive(name string, kind reflect.Kind, values any, isArray bool, jCol JSONParent, numEmpty int) error { + ct, ok := kindMappings[kind] + if !ok { + return &UnsupportedColumnTypeError{ + t: Type(fmt.Sprintf("%s - %s", kind, reflect.TypeOf(values).String())), + } + } + var err error + if isArray { + ct = fmt.Sprintf("Array(%s)", ct) + // if we have a []any we will need to cast to the target column type - this will be based on the first + // values types. Inconsistent slices will fail. + values, err = convertSlice(values) + if err != nil { + return err + } + } + col, err := jCol.upsertValue(name, ct) + if err != nil { + return err + } + + //pre pad with empty - e.g. for new values in maps + for i := 0; i < numEmpty; i++ { + if isArray { + // empty array for nil of the right type + err = col.AppendRow(reflect.MakeSlice(reflect.TypeOf(values), 0, 0).Interface()) + } else { + err = col.AppendRow(nil) + } + if err != nil { + return err + } + } + + return col.AppendRow(values) +} + +// converts a []any of primitives to a typed slice +// maybe this can be done with reflection but likely slower. investigate. +// this uses the first value to determine the type - subsequent values must currently be of the same type - we might cast later +// but wider driver doesn't support e.g. int to int64 +func convertSlice(values any) (any, error) { + rValues := reflect.ValueOf(values) + if rValues.Len() == 0 || rValues.Index(0).Kind() != reflect.Interface { + return values, nil + } + var fType reflect.Type + for i := 0; i < rValues.Len(); i++ { + elem := rValues.Index(i).Elem() + if elem.IsValid() { + fType = elem.Type() + break + } + } + if fType == nil { + return []any{}, nil + } + typedSlice := reflect.MakeSlice(reflect.SliceOf(fType), 0, rValues.Len()) + for i := 0; i < rValues.Len(); i++ { + value := rValues.Index(i) + if value.IsNil() { + typedSlice = reflect.Append(typedSlice, reflect.Zero(fType)) + continue + } + if rValues.Index(i).Elem().Type() != fType { + return nil, &Error{ + ColumnType: fmt.Sprint(fType), + Err: fmt.Errorf("inconsistent slices are not supported - expected %s got %s", fType, rValues.Index(i).Elem().Type()), + } + } + typedSlice = reflect.Append(typedSlice, rValues.Index(i).Elem()) + } + return typedSlice.Interface(), nil +} + +func (jCol *JSONList) createNewOffsets(num int) { + for i := 0; i < num; i++ { + //single depth so can take 1st + if jCol.offsets[0].values.col.Rows() == 0 { + // first entry in the column + jCol.offsets[0].values.col.Append(0) + } else { + // entry for this object to see offset from last - offsets are cumulative + jCol.offsets[0].values.col.Append(jCol.offsets[0].values.col.Row(jCol.offsets[0].values.col.Rows() - 1)) + } + } +} + +func getStructFieldName(field reflect.StructField) (string, bool) { + name := field.Name + tag := field.Tag.Get("json") + // not a standard but we allow - to omit fields + if tag == "-" { + return name, true + } + if tag != "" { + return tag, false + } + // support ch tag as well as this is used elsewhere + tag = field.Tag.Get("ch") + if tag == "-" { + return name, true + } + if tag != "" { + return tag, false + } + return name, false +} + +// ensures numeric keys and ` are escaped properly +func getMapFieldName(name string) string { + if !escapeColRegex.MatchString(name) { + return fmt.Sprintf("`%s`", colEscape.Replace(name)) + } + return colEscape.Replace(name) +} + +func parseSlice(name string, values any, jCol JSONParent, preFill int) error { + fType := reflect.TypeOf(values).Elem() + sKind := fType.Kind() + rValues := reflect.ValueOf(values) + + if sKind == reflect.Interface { + //use the first element to determine if it is a complex or primitive map - after this we need consistent dimensions + if rValues.Len() == 0 { + return nil + } + var value reflect.Value + for i := 0; i < rValues.Len(); i++ { + value = rValues.Index(i).Elem() + if value.IsValid() { + break + } + } + if !value.IsValid() { + return nil + } + fType = value.Type() + sKind = value.Kind() + } + + if _, ok := typeMappings[fType.String()]; ok { + return parseType(name, fType, values, true, jCol, preFill) + } else if sKind == reflect.Struct || sKind == reflect.Map || sKind == reflect.Slice { + if rValues.Len() == 0 { + return nil + } + col, err := jCol.upsertList(name) + if err != nil { + return err + } + col.createNewOffsets(preFill + 1) + for i := 0; i < rValues.Len(); i++ { + // increment offset + col.offsets[0].values.col[col.offsets[0].values.col.Rows()-1] += 1 + value := rValues.Index(i) + sKind = value.Kind() + if sKind == reflect.Interface { + sKind = value.Elem().Kind() + } + switch sKind { + case reflect.Struct: + col.isNested = true + if err = oldIterateStruct(value, col, 0); err != nil { + return err + } + case reflect.Map: + col.isNested = true + if err = oldIterateMap(value, col, 0); err != nil { + return err + } + case reflect.Slice: + if err = parseSlice("", value.Interface(), col, 0); err != nil { + return err + } + default: + // only happens if slice has a primitive mixed with complex types in a []any + return &Error{ + ColumnType: fmt.Sprint(sKind), + Err: fmt.Errorf("slices must be same dimension in column %s", col.Name()), + } + } + } + return nil + } + return parsePrimitive(name, sKind, values, true, jCol, preFill) +} + +func parseStruct(name string, structVal reflect.Value, jCol JSONParent, preFill int) error { + col, err := jCol.upsertObject(name) + if err != nil { + return err + } + return oldIterateStruct(structVal, col, preFill) +} + +func oldIterateStruct(structVal reflect.Value, col JSONParent, preFill int) error { + // structs generally have consistent field counts but we ignore nil values that are any as we can't infer from + // these until they occur - so we might need to either backfill when to do occur or insert empty based on previous + if structVal.Kind() == reflect.Interface { + // can happen if passed from []any + structVal = structVal.Elem() + } + + currentColumns := col.columnNames() + columnLookup := make(map[string]struct{}) + numRows := col.rows() + for _, name := range currentColumns { + columnLookup[name] = struct{}{} + } + addedColumns := make([]string, structVal.NumField(), structVal.NumField()) + newColumn := false + + for i := 0; i < structVal.NumField(); i++ { + fName, omit := getStructFieldName(structVal.Type().Field(i)) + if omit { + continue + } + field := structVal.Field(i) + if !field.CanInterface() { + // can't interface - likely not exported so ignore the field + continue + } + kind := field.Kind() + value := field.Interface() + fType := field.Type() + //resolve underlying kind + if kind == reflect.Interface { + if value == nil { + // ignore nil fields + continue + } + kind = reflect.TypeOf(value).Kind() + field = reflect.ValueOf(value) + fType = field.Type() + } + if _, ok := columnLookup[fName]; !ok && len(currentColumns) > 0 { + // new column - need to handle missing + preFill = numRows + newColumn = true + } + if _, ok := typeMappings[fType.String()]; ok { + if err := parseType(fName, fType, value, false, col, preFill); err != nil { + return err + } + } else { + switch kind { + case reflect.Slice: + if reflect.ValueOf(value).Len() == 0 { + continue + } + if err := parseSlice(fName, value, col, preFill); err != nil { + return err + } + case reflect.Struct: + if err := parseStruct(fName, field, col, preFill); err != nil { + return err + } + case reflect.Map: + if err := parseMap(fName, field, col, preFill); err != nil { + return err + } + default: + if err := parsePrimitive(fName, kind, value, false, col, preFill); err != nil { + return err + } + } + } + addedColumns[i] = fName + if newColumn { + // reset as otherwise prefill overflow to other fields. But don't reset if this prefill has come from + // a higher level + preFill = 0 + } + } + // handle missing + missingColumns := difference(currentColumns, addedColumns) + for _, name := range missingColumns { + if err := col.insertEmptyColumn(name); err != nil { + return err + } + } + return nil +} + +func parseMap(name string, mapVal reflect.Value, jCol JSONParent, preFill int) error { + if mapVal.Type().Key().Kind() != reflect.String { + return &Error{ + ColumnType: fmt.Sprint(mapVal.Type().Key().Kind()), + Err: fmt.Errorf("map keys must be string for column %s", name), + } + } + col, err := jCol.upsertObject(name) + if err != nil { + return err + } + return oldIterateMap(mapVal, col, preFill) +} + +func oldIterateMap(mapVal reflect.Value, col JSONParent, preFill int) error { + // maps can have inconsistent numbers of elements - we must ensure they are consistent in the encoding + // two inconsistent options - 1. new - map has new columns 2. massing - map has missing columns + // for (1) we need to update previous, for (2) we need to ensure we add a null entry + if mapVal.Kind() == reflect.Interface { + // can happen if passed from []any + mapVal = mapVal.Elem() + } + + currentColumns := col.columnNames() + //gives us a fast lookup for large maps + columnLookup := make(map[string]struct{}) + numRows := col.rows() + // true if we need nil values + for _, name := range currentColumns { + columnLookup[name] = struct{}{} + } + addedColumns := make([]string, len(mapVal.MapKeys()), len(mapVal.MapKeys())) + newColumn := false + for i, key := range mapVal.MapKeys() { + if newColumn { + // reset as otherwise prefill overflow to other fields. But don't reset if this prefill has come from + // a higher level + preFill = 0 + } + + name := getMapFieldName(key.Interface().(string)) + if _, ok := columnLookup[name]; !ok && len(currentColumns) > 0 { + // new column - need to handle + preFill = numRows + newColumn = true + } + field := mapVal.MapIndex(key) + kind := field.Kind() + fType := field.Type() + + if kind == reflect.Interface { + if field.Interface() == nil { + // ignore nil fields + continue + } + kind = reflect.TypeOf(field.Interface()).Kind() + field = reflect.ValueOf(field.Interface()) + fType = field.Type() + } + if _, ok := typeMappings[fType.String()]; ok { + if err := parseType(name, fType, field.Interface(), false, col, preFill); err != nil { + return err + } + } else { + switch kind { + case reflect.Struct: + if err := parseStruct(name, field, col, preFill); err != nil { + return err + } + case reflect.Slice: + if err := parseSlice(name, field.Interface(), col, preFill); err != nil { + return err + } + case reflect.Map: + if err := parseMap(name, field, col, preFill); err != nil { + return err + } + default: + if err := parsePrimitive(name, kind, field.Interface(), false, col, preFill); err != nil { + return err + } + } + } + addedColumns[i] = name + } + // handle missing + missingColumns := difference(currentColumns, addedColumns) + for _, name := range missingColumns { + if err := col.insertEmptyColumn(name); err != nil { + return err + } + } + return nil +} + +func appendStructOrMap(jCol *JSONObject, data any) error { + vData := reflect.ValueOf(data) + kind := vData.Kind() + if kind == reflect.Struct { + return oldIterateStruct(vData, jCol, 0) + } + if kind == reflect.Map { + if reflect.TypeOf(data).Key().Kind() != reflect.String { + return &Error{ + ColumnType: fmt.Sprint(reflect.TypeOf(data).Key().Kind()), + Err: fmt.Errorf("map keys must be string for column %s", jCol.Name()), + } + } + if jCol.columns == nil && vData.Len() == 0 { + // if map is empty, we need to create an empty Tuple to make sure subcolumns protocol is happy + // _dummy is a ClickHouse internal name for empty Tuple subcolumn + // it has the same effect as `INSERT INTO single_json_type_table VALUES ('{}');` + jCol.upsertValue("_dummy", "Int8") + return jCol.insertEmptyColumn("_dummy") + } + return oldIterateMap(vData, jCol, 0) + } + return &UnsupportedColumnTypeError{ + t: Type(fmt.Sprint(kind)), + } +} + +type JSONValue struct { + Interface + // represents the type e.g. uuid - these may have been mapped to a Column type support by JSON e.g. String + origType reflect.Type +} + +func (jCol *JSONValue) Reset() { + jCol.Interface.Reset() +} + +func (jCol *JSONValue) appendEmptyValue() error { + switch jCol.Interface.(type) { + case *Array: + if jCol.Rows() > 0 { + return jCol.AppendRow(reflect.MakeSlice(reflect.TypeOf(jCol.Row(0, false)), 0, 0).Interface()) + } + return &Error{ + ColumnType: "unknown", + Err: fmt.Errorf("can't add empty value to column %s - no entries to infer type", jCol.Name()), + } + default: + // can't just append nil here as we need a custom nil value for the type + if jCol.origType != nil { + return jCol.AppendRow(fmt.Sprint(reflect.New(jCol.origType).Elem().Interface())) + } + return jCol.AppendRow(nil) + } +} + +func (jCol *JSONValue) Type() Type { + return Type(fmt.Sprintf("%s %s", jCol.Name(), jCol.Interface.Type())) +} + +type JSONList struct { + Array + name string + isNested bool // indicates if this a list of objects i.e. a Nested +} + +func (jCol *JSONList) Name() string { + return jCol.name +} + +func (jCol *JSONList) columnNames() []string { + return jCol.Array.values.(*JSONObject).columnNames() +} + +func (jCol *JSONList) rows() int { + return jCol.values.(*JSONObject).Rows() +} + +func createJSONList(name string, tz *time.Location) (jCol *JSONList) { + // lists are represented as Nested which are in turn encoded as Array(Tuple()). We thus pass a Array(JSONObject()) + // as this encodes like a tuple + lCol := &JSONList{ + name: name, + } + lCol.values = &JSONObject{tz: tz} + // depth should always be one as nested arrays aren't possible + lCol.depth = 1 + lCol.scanType = scanTypeSlice + offsetScanTypes := []reflect.Type{lCol.scanType} + lCol.offsets = []*offset{{ + scanType: offsetScanTypes[0], + }} + return lCol +} + +func (jCol *JSONList) appendEmptyValue() error { + // only need to bump the offsets + jCol.createNewOffsets(1) + return nil +} + +func (jCol *JSONList) insertEmptyColumn(name string) error { + return jCol.values.(*JSONObject).insertEmptyColumn(name) +} + +func (jCol *JSONList) upsertValue(name string, ct string) (*JSONValue, error) { + // check if column exists and reuse if same type, error if same name and different type + jObj := jCol.values.(*JSONObject) + cols := jObj.columns + for i := range cols { + sCol := cols[i] + if sCol.Name() == name { + vCol, ok := cols[i].(*JSONValue) + if !ok { + sType := cols[i].Type() + return nil, &Error{ + ColumnType: fmt.Sprint(sType), + Err: fmt.Errorf("type mismatch in column %s - expected value, got %s", name, sType), + } + } + tType := vCol.Interface.Type() + if tType != Type(ct) { + return nil, &Error{ + ColumnType: ct, + Err: fmt.Errorf("type mismatch in column %s - expected %s, got %s", name, tType, ct), + } + } + return vCol, nil + } + } + col, err := Type(ct).Column(name, jObj.tz) + if err != nil { + return nil, err + } + vCol := &JSONValue{ + Interface: col, + } + jCol.values.(*JSONObject).columns = append(cols, vCol) // nolint:gocritic + return vCol, nil +} + +func (jCol *JSONList) upsertList(name string) (*JSONList, error) { + // check if column exists and reuse if same type, error if same name and different type + jObj := jCol.values.(*JSONObject) + cols := jCol.values.(*JSONObject).columns + for i := range cols { + sCol := cols[i] + if sCol.Name() == name { + sCol, ok := cols[i].(*JSONList) + if !ok { + return nil, &Error{ + ColumnType: fmt.Sprint(cols[i].Type()), + Err: fmt.Errorf("type mismatch in column %s - expected list, got %s", name, cols[i].Type()), + } + } + return sCol, nil + } + } + lCol := createJSONList(name, jObj.tz) + jCol.values.(*JSONObject).columns = append(cols, lCol) // nolint:gocritic + return lCol, nil + +} + +func (jCol *JSONList) upsertObject(name string) (*JSONObject, error) { + // check if column exists and reuse if same type, error if same name and different type + jObj := jCol.values.(*JSONObject) + cols := jObj.columns + for i := range cols { + sCol := cols[i] + if sCol.Name() == name { + sCol, ok := cols[i].(*JSONObject) + if !ok { + sType := cols[i].Type() + return nil, &Error{ + ColumnType: fmt.Sprint(sType), + Err: fmt.Errorf("type mismatch in column %s, expected object got %s", name, sType), + } + } + return sCol, nil + } + } + // lists are represented as Nested which are in turn encoded as Array(Tuple()). We thus pass a Array(JSONObject()) + // as this encodes like a tuple + oCol := &JSONObject{ + name: name, + tz: jObj.tz, + } + jCol.values.(*JSONObject).columns = append(cols, oCol) // nolint:gocritic + return oCol, nil +} + +func (jCol *JSONList) Type() Type { + cols := jCol.values.(*JSONObject).columns + subTypes := make([]string, len(cols)) + for i, v := range cols { + subTypes[i] = string(v.Type()) + } + // can be a list of lists or a nested + if jCol.isNested { + return Type(fmt.Sprintf("%s Nested(%s)", jCol.name, strings.Join(subTypes, ", "))) + } + return Type(fmt.Sprintf("%s Array(%s)", jCol.name, strings.Join(subTypes, ", "))) +} + +type JSONObject struct { + columns []ObjectJSON + name string + root bool + encoding uint8 + tz *time.Location +} + +func (jCol *JSONObject) Reset() { + for i := range jCol.columns { + jCol.columns[i].Reset() + } +} + +func (jCol *JSONObject) Name() string { + return jCol.name +} + +func (jCol *JSONObject) columnNames() []string { + columns := make([]string, len(jCol.columns), len(jCol.columns)) + for i := range jCol.columns { + columns[i] = jCol.columns[i].Name() + } + return columns +} + +func (jCol *JSONObject) rows() int { + return jCol.Rows() +} + +func (jCol *JSONObject) appendEmptyValue() error { + for i := range jCol.columns { + if err := jCol.columns[i].appendEmptyValue(); err != nil { + return err + } + } + return nil +} + +func (jCol *JSONObject) insertEmptyColumn(name string) error { + for i := range jCol.columns { + if jCol.columns[i].Name() == name { + if err := jCol.columns[i].appendEmptyValue(); err != nil { + return err + } + return nil + } + } + return &Error{ + ColumnType: "unknown", + Err: fmt.Errorf("column %s is missing - empty value cannot be appended", name), + } +} + +func (jCol *JSONObject) upsertValue(name string, ct string) (*JSONValue, error) { + for i := range jCol.columns { + sCol := jCol.columns[i] + if sCol.Name() == name { + vCol, ok := jCol.columns[i].(*JSONValue) + if !ok { + sType := jCol.columns[i].Type() + return nil, &Error{ + ColumnType: fmt.Sprint(sType), + Err: fmt.Errorf("type mismatch in column %s, expected value got %s", name, sType), + } + } + if vCol.Interface.Type() != Type(ct) { + return nil, &Error{ + ColumnType: ct, + Err: fmt.Errorf("type mismatch in column %s, expected %s got %s", name, vCol.Interface.Type(), ct), + } + } + return vCol, nil + } + } + col, err := Type(ct).Column(name, jCol.tz) + if err != nil { + return nil, err + } + vCol := &JSONValue{ + Interface: col, + } + jCol.columns = append(jCol.columns, vCol) + return vCol, nil +} + +func (jCol *JSONObject) upsertList(name string) (*JSONList, error) { + for i := range jCol.columns { + sCol := jCol.columns[i] + if sCol.Name() == name { + sCol, ok := jCol.columns[i].(*JSONList) + if !ok { + sType := jCol.columns[i].Type() + return nil, &Error{ + ColumnType: fmt.Sprint(sType), + Err: fmt.Errorf("type mismatch in column %s, expected list got %s", name, sType), + } + } + return sCol, nil + } + } + lCol := createJSONList(name, jCol.tz) + jCol.columns = append(jCol.columns, lCol) + return lCol, nil +} + +func (jCol *JSONObject) upsertObject(name string) (*JSONObject, error) { + // check if it exists + for i := range jCol.columns { + sCol := jCol.columns[i] + if sCol.Name() == name { + sCol, ok := jCol.columns[i].(*JSONObject) + if !ok { + sType := jCol.columns[i].Type() + return nil, &Error{ + ColumnType: fmt.Sprint(sType), + Err: fmt.Errorf("type mismatch in column %s, expected object got %s", name, sType), + } + } + return sCol, nil + } + } + // not present so create + oCol := &JSONObject{ + name: name, + tz: jCol.tz, + } + jCol.columns = append(jCol.columns, oCol) + return oCol, nil +} + +func (jCol *JSONObject) Type() Type { + if jCol.root { + return "Object('json')" + } + return jCol.FullType() +} + +func (jCol *JSONObject) FullType() Type { + subTypes := make([]string, len(jCol.columns)) + for i, v := range jCol.columns { + subTypes[i] = string(v.Type()) + } + if jCol.root { + return Type(fmt.Sprintf("Tuple(%s)", strings.Join(subTypes, ", "))) + } + return Type(fmt.Sprintf("%s Tuple(%s)", jCol.name, strings.Join(subTypes, ", "))) +} + +func (jCol *JSONObject) ScanType() reflect.Type { + return scanTypeMap +} + +func (jCol *JSONObject) Rows() int { + if len(jCol.columns) != 0 { + return jCol.columns[0].Rows() + } + return 0 +} + +// ClickHouse returns JSON as a tuple i.e. these will never be invoked + +func (jCol *JSONObject) Row(i int, ptr bool) any { + panic("Not implemented") +} + +func (jCol *JSONObject) ScanRow(dest any, row int) error { + panic("Not implemented") +} + +func (jCol *JSONObject) Append(v any) (nulls []uint8, err error) { + jSlice := reflect.ValueOf(v) + if jSlice.Kind() != reflect.Slice { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(jCol.Type()), + From: fmt.Sprintf("slice of structs/map or strings required - received %T", v), + } + } + for i := 0; i < jSlice.Len(); i++ { + if err := jCol.AppendRow(jSlice.Index(i).Interface()); err != nil { + return nil, err + } + } + return nil, nil +} + +func (jCol *JSONObject) AppendRow(v any) error { + if reflect.ValueOf(v).Kind() == reflect.Struct || reflect.ValueOf(v).Kind() == reflect.Map { + if jCol.columns != nil && jCol.encoding == 1 { + return &Error{ + ColumnType: fmt.Sprint(jCol.Type()), + Err: fmt.Errorf("encoding of JSON columns cannot be mixed in a batch - %s cannot be added as previously String", reflect.ValueOf(v).Kind()), + } + } + err := appendStructOrMap(jCol, v) + return err + } + switch v := v.(type) { + case string: + if jCol.columns != nil && jCol.encoding == 0 { + return &Error{ + ColumnType: fmt.Sprint(jCol.Type()), + Err: fmt.Errorf("encoding of JSON columns cannot be mixed in a batch - %s cannot be added as previously Struct/Map", reflect.ValueOf(v).Kind()), + } + } + jCol.encoding = 1 + if jCol.columns == nil { + jCol.columns = append(jCol.columns, &JSONValue{Interface: &String{}}) + } + jCol.columns[0].AppendRow(v) + default: + return &ColumnConverterError{ + Op: "AppendRow", + To: "String", + From: fmt.Sprintf("json row must be struct, map or string - received %T", v), + } + } + return nil +} + +func (jCol *JSONObject) Decode(reader *proto.Reader, rows int) error { + panic("Not implemented") +} + +func (jCol *JSONObject) Encode(buffer *proto.Buffer) { + if jCol.root && jCol.encoding == 0 { + buffer.PutString(string(jCol.FullType())) + } + for _, c := range jCol.columns { + c.Encode(buffer) + } +} + +func (jCol *JSONObject) ReadStatePrefix(reader *proto.Reader) error { + _, err := reader.UInt8() + return err +} + +func (jCol *JSONObject) WriteStatePrefix(buffer *proto.Buffer) error { + buffer.PutUInt8(jCol.encoding) + return nil +} + +var ( + _ Interface = (*JSONObject)(nil) + _ CustomSerialization = (*JSONObject)(nil) +) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/sharedvariant.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/sharedvariant.go new file mode 100644 index 00000000..b356f1ac --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/sharedvariant.go @@ -0,0 +1,72 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package column + +import ( + "github.com/ClickHouse/ch-go/proto" + "reflect" +) + +type SharedVariant struct { + name string + stringData String +} + +func (c *SharedVariant) Name() string { + return c.name +} + +func (c *SharedVariant) Type() Type { + return "SharedVariant" +} + +func (c *SharedVariant) Rows() int { + return c.stringData.Rows() +} + +func (c *SharedVariant) Row(i int, ptr bool) any { + return c.stringData.Row(i, ptr) +} + +func (c *SharedVariant) ScanRow(dest any, row int) error { + return c.stringData.ScanRow(dest, row) +} + +func (c *SharedVariant) Append(v any) (nulls []uint8, err error) { + return c.stringData.Append(v) +} + +func (c *SharedVariant) AppendRow(v any) error { + return c.stringData.AppendRow(v) +} + +func (c *SharedVariant) Decode(reader *proto.Reader, rows int) error { + return c.stringData.Decode(reader, rows) +} + +func (c *SharedVariant) Encode(buffer *proto.Buffer) { + c.stringData.Encode(buffer) +} + +func (c *SharedVariant) ScanType() reflect.Type { + return c.stringData.ScanType() +} + +func (c *SharedVariant) Reset() { + c.stringData.Reset() +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go index 5ce480b0..79b86e70 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go @@ -21,6 +21,7 @@ import ( "database/sql" "database/sql/driver" "encoding" + "encoding/json" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -71,6 +72,11 @@ func (col *String) ScanRow(dest any, row int) error { **d = val case *sql.NullString: return d.Scan(val) + case *json.RawMessage: + *d = json.RawMessage(val) + case **json.RawMessage: + *d = new(json.RawMessage) + **d = json.RawMessage(val) case encoding.BinaryUnmarshaler: return d.UnmarshalBinary(binary.Str2Bytes(val, len(val))) default: @@ -111,6 +117,10 @@ func (col *String) AppendRow(v any) error { default: col.col.Append("") } + case json.RawMessage: + col.col.AppendBytes(v) + case *json.RawMessage: + col.col.AppendBytes(*v) case []byte: col.col.AppendBytes(v) case nil: @@ -171,6 +181,16 @@ func (col *String) Append(v any) (nulls []uint8, err error) { } col.AppendRow(v[i]) } + case []json.RawMessage: + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(string(v[i])) + } + case []*json.RawMessage: + nulls = make([]uint8, len(v)) + for i := range v { + col.col.Append(string(*v[i])) + } case [][]byte: nulls = make([]uint8, len(v)) for i := range v { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date_helpers.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/time_helper.go similarity index 60% rename from vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date_helpers.go rename to vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/time_helper.go index 2e6691ec..f71b8f92 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date_helpers.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/time_helper.go @@ -17,31 +17,13 @@ package column -import ( - "fmt" - "time" -) +import "time" -const secInDay = 24 * 60 * 60 +// getTimeWithDifferentLocation returns the same time but with different location, e.g. +// "2024-08-15 13:22:34 -03:00" will become "2024-08-15 13:22:34 +04:00". +func getTimeWithDifferentLocation(t time.Time, loc *time.Location) time.Time { + year, month, day := t.Date() + hour, minute, sec := t.Clock() -func dateOverflow(min, max, v time.Time, format string) error { - if v.Before(min) || v.After(max) { - return &DateOverflowError{ - Min: min, - Max: max, - Value: v, - Format: format, - } - } - return nil -} - -type DateOverflowError struct { - Min, Max time.Time - Value time.Time - Format string -} - -func (e *DateOverflowError) Error() string { - return fmt.Sprintf("clickhouse: dateTime overflow. must be between %s and %s", e.Min.Format(e.Format), e.Max.Format(e.Format)) + return time.Date(year, month, day, hour, minute, sec, t.Nanosecond(), loc) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go index 95e00db0..ea5bfff0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go @@ -200,13 +200,6 @@ func setJSONFieldValue(field reflect.Value, value reflect.Value) error { } } - // check if our target is a string - if field.Kind() == reflect.String { - if v := reflect.ValueOf(fmt.Sprint(value.Interface())); v.Type().AssignableTo(field.Type()) { - field.Set(v) - return nil - } - } if value.CanConvert(field.Type()) { field.Set(value.Convert(field.Type())) return nil @@ -447,7 +440,7 @@ func (col *Tuple) scan(targetType reflect.Type, row int) (reflect.Value, error) //tuples can be scanned into slices - specifically default for unnamed tuples rSlice, err := col.scanSlice(targetType, row) if err != nil { - return reflect.Value{}, nil + return reflect.Value{}, err } return rSlice, nil case reflect.Interface: @@ -518,6 +511,68 @@ func (col *Tuple) AppendRow(v any) error { value = value.Elem() } switch value.Kind() { + case reflect.Struct: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if !col.isNamed { + return &Error{ + ColumnType: string(col.chType), + Err: fmt.Errorf("converting from %T is not supported for unnamed tuples - use a slice", v), + } + } + + valueType := value.Type() + fieldNames := make(map[string]struct{}, value.NumField()) + for i := 0; i < value.NumField(); i++ { + if !value.Field(i).CanInterface() { + // can't interface - likely not exported so ignore the field + continue + } + name, omit := getStructFieldName(valueType.Field(i)) + if omit { + continue + } + fieldNames[name] = struct{}{} + } + + if len(fieldNames) != len(col.columns) { + return &Error{ + ColumnType: string(col.chType), + Err: fmt.Errorf("invalid size. expected %d got %d", len(col.columns), len(fieldNames)), + } + } + + for i := 0; i < value.NumField(); i++ { + if !value.Field(i).CanInterface() { + // can't interface - likely not exported so ignore the field + continue + } + name, omit := getStructFieldName(valueType.Field(i)) + if omit { + continue + } + if _, ok := col.index[name]; !ok { + return &Error{ + ColumnType: string(col.chType), + Err: fmt.Errorf("sub column '%s' does not exist in %s", name, col.Name()), + } + } + if err := col.columns[col.index[name]].AppendRow(value.Field(i).Interface()); err != nil { + return err + } + } + return nil case reflect.Map: if !col.isNamed { return &Error{ diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/variant.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/variant.go new file mode 100644 index 00000000..e1126cad --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/variant.go @@ -0,0 +1,368 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package column + +import ( + "database/sql/driver" + "fmt" + "github.com/ClickHouse/clickhouse-go/v2/lib/chcol" + "reflect" + "strings" + "time" + + "github.com/ClickHouse/ch-go/proto" +) + +const SupportedVariantSerializationVersion = 0 +const NullVariantDiscriminator uint8 = 255 + +type Variant struct { + chType Type + name string + + discriminators []uint8 + offsets []int + + columns []Interface + columnTypeIndex map[string]uint8 +} + +func (c *Variant) parse(t Type, tz *time.Location) (_ *Variant, err error) { + c.chType = t + var ( + element []rune + elements []Type + brackets int + appendElement = func() { + if len(element) != 0 { + cType := strings.TrimSpace(string(element)) + if parts := strings.SplitN(cType, " ", 2); len(parts) == 2 { + if !strings.Contains(parts[0], "(") { + cType = parts[1] + } + } + + elements = append(elements, Type(strings.TrimSpace(cType))) + } + } + ) + + for _, r := range t.params() { + switch r { + case '(': + brackets++ + case ')': + brackets-- + case ',': + if brackets == 0 { + appendElement() + element = element[:0] + continue + } + } + element = append(element, r) + } + + appendElement() + + c.columnTypeIndex = make(map[string]uint8, len(elements)) + for _, columnType := range elements { + column, err := columnType.Column("", tz) + if err != nil { + return nil, err + } + + c.addColumn(column) + } + + if len(c.columns) != 0 { + return c, nil + } + + return nil, &UnsupportedColumnTypeError{ + t: t, + } +} + +func (c *Variant) addColumn(col Interface) { + c.columns = append(c.columns, col) + c.columnTypeIndex[string(col.Type())] = uint8(len(c.columns) - 1) +} + +func (c *Variant) appendDiscriminatorRow(d uint8) { + c.discriminators = append(c.discriminators, d) +} + +func (c *Variant) appendNullRow() { + c.appendDiscriminatorRow(NullVariantDiscriminator) +} + +func (c *Variant) Name() string { + return c.name +} + +func (c *Variant) Type() Type { + return c.chType +} + +func (c *Variant) Rows() int { + return len(c.discriminators) +} + +func (c *Variant) Row(i int, ptr bool) any { + typeIndex := c.discriminators[i] + offsetIndex := c.offsets[i] + var value any + var chType string + if typeIndex != NullVariantDiscriminator { + value = c.columns[typeIndex].Row(offsetIndex, ptr) + chType = string(c.columns[typeIndex].Type()) + } + + vt := chcol.NewVariantWithType(value, chType) + if ptr { + return &vt + } + + return vt +} + +func (c *Variant) ScanRow(dest any, row int) error { + typeIndex := c.discriminators[row] + offsetIndex := c.offsets[row] + var value any + var chType string + if typeIndex != NullVariantDiscriminator { + value = c.columns[typeIndex].Row(offsetIndex, false) + chType = string(c.columns[typeIndex].Type()) + } + + switch v := dest.(type) { + case *chcol.Variant: + vt := chcol.NewVariantWithType(value, chType) + *v = vt + case **chcol.Variant: + vt := chcol.NewVariantWithType(value, chType) + **v = vt + default: + if typeIndex == NullVariantDiscriminator { + return nil + } + + if err := c.columns[typeIndex].ScanRow(dest, offsetIndex); err != nil { + return err + } + } + + return nil +} + +func (c *Variant) Append(v any) (nulls []uint8, err error) { + switch vv := v.(type) { + case []chcol.Variant: + for i, vt := range vv { + err := c.AppendRow(vt) + if err != nil { + return nil, fmt.Errorf("failed to AppendRow at index %d: %w", i, err) + } + } + + return nil, nil + case []*chcol.Variant: + for i, vt := range vv { + err := c.AppendRow(vt) + if err != nil { + return nil, fmt.Errorf("failed to AppendRow at index %d: %w", i, err) + } + } + + return nil, nil + default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(c.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + + return c.Append(val) + } + + return nil, &ColumnConverterError{ + Op: "Append", + To: string(c.chType), + From: fmt.Sprintf("%T", v), + } + } +} + +func (c *Variant) AppendRow(v any) error { + var requestedType string + switch vv := v.(type) { + case nil: + c.appendNullRow() + return nil + case chcol.Variant: + requestedType = vv.Type() + v = vv.Any() + if vv.Nil() { + c.appendNullRow() + return nil + } + case *chcol.Variant: + requestedType = vv.Type() + v = vv.Any() + if vv.Nil() { + c.appendNullRow() + return nil + } + } + + if requestedType != "" { + typeIndex, ok := c.columnTypeIndex[requestedType] + if !ok { + return fmt.Errorf("value %v cannot be stored in variant column %s with requested type %s: type not present in variant", v, c.chType, requestedType) + } + + if err := c.columns[typeIndex].AppendRow(v); err != nil { + return fmt.Errorf("failed to append row to variant column with requested type %s: %w", requestedType, err) + } + + c.appendDiscriminatorRow(typeIndex) + return nil + } + + // If preferred type wasn't provided, try each column + var err error + for i, col := range c.columns { + if err = col.AppendRow(v); err == nil { + c.appendDiscriminatorRow(uint8(i)) + return nil + } + } + + return fmt.Errorf("value \"%v\" cannot be stored in variant column: no compatible types", v) +} + +func (c *Variant) encodeHeader(buffer *proto.Buffer) { + buffer.PutUInt64(SupportedVariantSerializationVersion) +} + +func (c *Variant) encodeData(buffer *proto.Buffer) { + buffer.PutRaw(c.discriminators) + + for _, col := range c.columns { + col.Encode(buffer) + } +} + +func (c *Variant) WriteStatePrefix(buffer *proto.Buffer) error { + c.encodeHeader(buffer) + + return nil +} + +func (c *Variant) Encode(buffer *proto.Buffer) { + c.encodeData(buffer) +} + +func (c *Variant) ScanType() reflect.Type { + return scanTypeVariant +} + +func (c *Variant) Reset() { + c.discriminators = c.discriminators[:0] + + for _, col := range c.columns { + col.Reset() + } +} + +func (c *Variant) decodeHeader(reader *proto.Reader) error { + variantSerializationVersion, err := reader.UInt64() + if err != nil { + return fmt.Errorf("failed to read variant discriminator version: %w", err) + } + + if variantSerializationVersion != SupportedVariantSerializationVersion { + return fmt.Errorf("unsupported variant discriminator version: %d", variantSerializationVersion) + } + + for _, col := range c.columns { + if serialize, ok := col.(CustomSerialization); ok { + if err := serialize.ReadStatePrefix(reader); err != nil { + return fmt.Errorf("failed to read prefix for type %s in variant: %w", col.Type(), err) + } + } + } + + return nil +} + +func (c *Variant) decodeData(reader *proto.Reader, rows int) error { + c.discriminators = make([]uint8, rows) + c.offsets = make([]int, rows) + rowCountByType := make(map[uint8]int, len(c.columns)) + + for i := 0; i < rows; i++ { + disc, err := reader.ReadByte() + if err != nil { + return fmt.Errorf("failed to read discriminator at index %d: %w", i, err) + } + + c.discriminators[i] = disc + if rowCountByType[disc] == 0 { + rowCountByType[disc] = 1 + } else { + rowCountByType[disc]++ + } + + c.offsets[i] = rowCountByType[disc] - 1 + } + + for i, col := range c.columns { + cRows := rowCountByType[uint8(i)] + if err := col.Decode(reader, cRows); err != nil { + return fmt.Errorf("failed to decode variant column with %s type: %w", col.Type(), err) + } + } + + return nil +} + +func (c *Variant) ReadStatePrefix(reader *proto.Reader) error { + err := c.decodeHeader(reader) + if err != nil { + return fmt.Errorf("failed to decode variant header: %w", err) + } + + return nil +} + +func (c *Variant) Decode(reader *proto.Reader, rows int) error { + err := c.decodeData(reader, rows) + if err != nil { + return fmt.Errorf("failed to decode variant data: %w", err) + } + + return nil +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go index f88bb43e..a3a9c35f 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go @@ -22,6 +22,7 @@ import ( "reflect" "time" + "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) @@ -85,6 +86,7 @@ type ( Send() error IsSent() bool Rows() int + Columns() []column.Interface } BatchColumn interface { Append(any) error diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go index d81760c9..c214c2c9 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go @@ -2,6 +2,7 @@ package driver type PrepareBatchOptions struct { ReleaseConnection bool + CloseOnFlush bool } type PrepareBatchOption func(options *PrepareBatchOptions) @@ -11,3 +12,10 @@ func WithReleaseConnection() PrepareBatchOption { options.ReleaseConnection = true } } + +// WithCloseOnFlush closes batch INSERT query when Flush is executed +func WithCloseOnFlush() PrepareBatchOption { + return func(options *PrepareBatchOptions) { + options.CloseOnFlush = true + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go index 79a5f13f..6debe8cc 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go @@ -301,8 +301,6 @@ func (e *BlockError) Error() string { switch err := e.Err.(type) { case *column.Error: return fmt.Sprintf("clickhouse [%s]: (%s %s) %s", e.Op, e.ColumnName, err.ColumnType, err.Err) - case *column.DateOverflowError: - return fmt.Sprintf("clickhouse: dateTime overflow. %s must be between %s and %s", e.ColumnName, err.Min.Format(err.Format), err.Max.Format(err.Format)) } return fmt.Sprintf("clickhouse [%s]: %s %s", e.Op, e.ColumnName, e.Err) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/main.tf b/vendor/github.com/ClickHouse/clickhouse-go/v2/main.tf deleted file mode 100644 index 95a0e7da..00000000 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/main.tf +++ /dev/null @@ -1,59 +0,0 @@ -terraform { - required_providers { - clickhouse = { - source = "ClickHouse/clickhouse" - version = "~> 0.0.2" - } - } -} - -variable "organization_id" { - type = string -} - -variable "token_key" { - type = string -} - -variable "token_secret" { - type = string -} - -variable "service_name" { - type = string -} - -variable "service_password" { - type = string -} - -provider clickhouse { - environment = "production" - organization_id = var.organization_id - token_key = var.token_key - token_secret = var.token_secret -} - -resource "clickhouse_service" "service" { - name = var.service_name - cloud_provider = "aws" - region = "us-east-2" - tier = "development" - idle_scaling = true - password = var.service_password - - ip_access = [ - { - source = "0.0.0.0/0" - description = "Anywhere" - } - ] -} - -output "CLICKHOUSE_HOST" { - value = clickhouse_service.service.endpoints.0.host -} - -output "SERVICE_ID" { - value = clickhouse_service.service.id -} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go index 9b7e94ca..e5473807 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/query_parameters.go @@ -18,10 +18,11 @@ package clickhouse import ( - "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - "github.com/pkg/errors" + "errors" "regexp" "time" + + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" ) var ( diff --git a/vendor/github.com/JohnCGriffin/overflow/.travis.yml b/vendor/github.com/JohnCGriffin/overflow/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/JohnCGriffin/overflow/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/JohnCGriffin/overflow/README.md b/vendor/github.com/JohnCGriffin/overflow/README.md deleted file mode 100644 index 418e4275..00000000 --- a/vendor/github.com/JohnCGriffin/overflow/README.md +++ /dev/null @@ -1,84 +0,0 @@ -[![Build Status](https://travis-ci.org/JohnCGriffin/overflow.png)](https://travis-ci.org/JohnCGriffin/overflow) -# overflow -Check for int/int8/int16/int64/int32 integer overflow in Golang arithmetic. -### Install -``` -go get github.com/johncgriffin/overflow -``` -Note that because Go has no template types, the majority of repetitive code is -generated by overflow_template.sh. If you have to change an -algorithm, change it there and regenerate the Go code via: -``` -go generate -``` -### Synopsis - -``` -package main - -import "fmt" -import "math" -import "github.com/JohnCGriffin/overflow" - -func main() { - - addend := math.MaxInt64 - 5 - - for i := 0; i < 10; i++ { - sum, ok := overflow.Add(addend, i) - fmt.Printf("%v+%v -> (%v,%v)\n", - addend, i, sum, ok) - } - -} -``` -yields the output -``` -9223372036854775802+0 -> (9223372036854775802,true) -9223372036854775802+1 -> (9223372036854775803,true) -9223372036854775802+2 -> (9223372036854775804,true) -9223372036854775802+3 -> (9223372036854775805,true) -9223372036854775802+4 -> (9223372036854775806,true) -9223372036854775802+5 -> (9223372036854775807,true) -9223372036854775802+6 -> (0,false) -9223372036854775802+7 -> (0,false) -9223372036854775802+8 -> (0,false) -9223372036854775802+9 -> (0,false) -``` - -For int, int64, and int32 types, provide Add, Add32, Add64, Sub, Sub32, Sub64, etc. -Unsigned types not covered at the moment, but such additions are welcome. - -### Stay calm and panic - -There's a good case to be made that a panic is an unidiomatic but proper response. Iff you -believe that there's no valid way to continue your program after math goes wayward, you can -use the easier Addp, Mulp, Subp, and Divp versions which return the normal result or panic. - - -- - - -MIT License - -Copyright (c) 2017 John C. Griffin, - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - - diff --git a/vendor/github.com/JohnCGriffin/overflow/overflow.go b/vendor/github.com/JohnCGriffin/overflow/overflow.go deleted file mode 100644 index 17e24170..00000000 --- a/vendor/github.com/JohnCGriffin/overflow/overflow.go +++ /dev/null @@ -1,129 +0,0 @@ -/*Package overflow offers overflow-checked integer arithmetic operations -for int, int32, and int64. Each of the operations returns a -result,bool combination. This was prompted by the need to know when -to flow into higher precision types from the math.big library. - -For instance, assuing a 64 bit machine: - -10 + 20 -> 30 -int(math.MaxInt64) + 1 -> -9223372036854775808 - -whereas - -overflow.Add(10,20) -> (30, true) -overflow.Add(math.MaxInt64,1) -> (0, false) - -Add, Sub, Mul, Div are for int. Add64, Add32, etc. are specifically sized. - -If anybody wishes an unsigned version, submit a pull request for code -and new tests. */ -package overflow - -//go:generate ./overflow_template.sh - -import "math" - -func _is64Bit() bool { - maxU32 := uint(math.MaxUint32) - return ((maxU32 << 1) >> 1) == maxU32 -} - -/********** PARTIAL TEST COVERAGE FROM HERE DOWN ************* - -The only way that I could see to do this is a combination of -my normal 64 bit system and a GopherJS running on Node. My -understanding is that its ints are 32 bit. - -So, FEEL FREE to carefully review the code visually. - -*************************************************************/ - -// Unspecified size, i.e. normal signed int - -// Add sums two ints, returning the result and a boolean status. -func Add(a, b int) (int, bool) { - if _is64Bit() { - r64, ok := Add64(int64(a), int64(b)) - return int(r64), ok - } - r32, ok := Add32(int32(a), int32(b)) - return int(r32), ok -} - -// Sub returns the difference of two ints and a boolean status. -func Sub(a, b int) (int, bool) { - if _is64Bit() { - r64, ok := Sub64(int64(a), int64(b)) - return int(r64), ok - } - r32, ok := Sub32(int32(a), int32(b)) - return int(r32), ok -} - -// Mul returns the product of two ints and a boolean status. -func Mul(a, b int) (int, bool) { - if _is64Bit() { - r64, ok := Mul64(int64(a), int64(b)) - return int(r64), ok - } - r32, ok := Mul32(int32(a), int32(b)) - return int(r32), ok -} - -// Div returns the quotient of two ints and a boolean status -func Div(a, b int) (int, bool) { - if _is64Bit() { - r64, ok := Div64(int64(a), int64(b)) - return int(r64), ok - } - r32, ok := Div32(int32(a), int32(b)) - return int(r32), ok -} - -// Quotient returns the quotient, remainder and status of two ints -func Quotient(a, b int) (int, int, bool) { - if _is64Bit() { - q64, r64, ok := Quotient64(int64(a), int64(b)) - return int(q64), int(r64), ok - } - q32, r32, ok := Quotient32(int32(a), int32(b)) - return int(q32), int(r32), ok -} - -/************* Panic versions for int ****************/ - -// Addp returns the sum of two ints, panicking on overflow -func Addp(a, b int) int { - r, ok := Add(a, b) - if !ok { - panic("addition overflow") - } - return r -} - -// Subp returns the difference of two ints, panicking on overflow. -func Subp(a, b int) int { - r, ok := Sub(a, b) - if !ok { - panic("subtraction overflow") - } - return r -} - -// Mulp returns the product of two ints, panicking on overflow. -func Mulp(a, b int) int { - r, ok := Mul(a, b) - if !ok { - panic("multiplication overflow") - } - return r -} - -// Divp returns the quotient of two ints, panicking on overflow. -func Divp(a, b int) int { - r, ok := Div(a, b) - if !ok { - panic("division failure") - } - return r -} diff --git a/vendor/github.com/JohnCGriffin/overflow/overflow_impl.go b/vendor/github.com/JohnCGriffin/overflow/overflow_impl.go deleted file mode 100644 index d1cbe867..00000000 --- a/vendor/github.com/JohnCGriffin/overflow/overflow_impl.go +++ /dev/null @@ -1,386 +0,0 @@ -package overflow - -// This is generated code, created by overflow_template.sh executed -// by "go generate" - - - - -// Add8 performs + operation on two int8 operands -// returning a result and status -func Add8(a, b int8) (int8, bool) { - c := a + b - if (c > a) == (b > 0) { - return c, true - } - return c, false -} - -// Add8p is the unchecked panicing version of Add8 -func Add8p(a, b int8) int8 { - r, ok := Add8(a, b) - if !ok { - panic("addition overflow") - } - return r -} - - -// Sub8 performs - operation on two int8 operands -// returning a result and status -func Sub8(a, b int8) (int8, bool) { - c := a - b - if (c < a) == (b > 0) { - return c, true - } - return c, false -} - -// Sub8p is the unchecked panicing version of Sub8 -func Sub8p(a, b int8) int8 { - r, ok := Sub8(a, b) - if !ok { - panic("subtraction overflow") - } - return r -} - - -// Mul8 performs * operation on two int8 operands -// returning a result and status -func Mul8(a, b int8) (int8, bool) { - if a == 0 || b == 0 { - return 0, true - } - c := a * b - if (c < 0) == ((a < 0) != (b < 0)) { - if c/b == a { - return c, true - } - } - return c, false -} - -// Mul8p is the unchecked panicing version of Mul8 -func Mul8p(a, b int8) int8 { - r, ok := Mul8(a, b) - if !ok { - panic("multiplication overflow") - } - return r -} - - - -// Div8 performs / operation on two int8 operands -// returning a result and status -func Div8(a, b int8) (int8, bool) { - q, _, ok := Quotient8(a, b) - return q, ok -} - -// Div8p is the unchecked panicing version of Div8 -func Div8p(a, b int8) int8 { - r, ok := Div8(a, b) - if !ok { - panic("division failure") - } - return r -} - -// Quotient8 performs + operation on two int8 operands -// returning a quotient, a remainder and status -func Quotient8(a, b int8) (int8, int8, bool) { - if b == 0 { - return 0, 0, false - } - c := a / b - status := (c < 0) == ((a < 0) != (b < 0)) - return c, a % b, status -} - - - -// Add16 performs + operation on two int16 operands -// returning a result and status -func Add16(a, b int16) (int16, bool) { - c := a + b - if (c > a) == (b > 0) { - return c, true - } - return c, false -} - -// Add16p is the unchecked panicing version of Add16 -func Add16p(a, b int16) int16 { - r, ok := Add16(a, b) - if !ok { - panic("addition overflow") - } - return r -} - - -// Sub16 performs - operation on two int16 operands -// returning a result and status -func Sub16(a, b int16) (int16, bool) { - c := a - b - if (c < a) == (b > 0) { - return c, true - } - return c, false -} - -// Sub16p is the unchecked panicing version of Sub16 -func Sub16p(a, b int16) int16 { - r, ok := Sub16(a, b) - if !ok { - panic("subtraction overflow") - } - return r -} - - -// Mul16 performs * operation on two int16 operands -// returning a result and status -func Mul16(a, b int16) (int16, bool) { - if a == 0 || b == 0 { - return 0, true - } - c := a * b - if (c < 0) == ((a < 0) != (b < 0)) { - if c/b == a { - return c, true - } - } - return c, false -} - -// Mul16p is the unchecked panicing version of Mul16 -func Mul16p(a, b int16) int16 { - r, ok := Mul16(a, b) - if !ok { - panic("multiplication overflow") - } - return r -} - - - -// Div16 performs / operation on two int16 operands -// returning a result and status -func Div16(a, b int16) (int16, bool) { - q, _, ok := Quotient16(a, b) - return q, ok -} - -// Div16p is the unchecked panicing version of Div16 -func Div16p(a, b int16) int16 { - r, ok := Div16(a, b) - if !ok { - panic("division failure") - } - return r -} - -// Quotient16 performs + operation on two int16 operands -// returning a quotient, a remainder and status -func Quotient16(a, b int16) (int16, int16, bool) { - if b == 0 { - return 0, 0, false - } - c := a / b - status := (c < 0) == ((a < 0) != (b < 0)) - return c, a % b, status -} - - - -// Add32 performs + operation on two int32 operands -// returning a result and status -func Add32(a, b int32) (int32, bool) { - c := a + b - if (c > a) == (b > 0) { - return c, true - } - return c, false -} - -// Add32p is the unchecked panicing version of Add32 -func Add32p(a, b int32) int32 { - r, ok := Add32(a, b) - if !ok { - panic("addition overflow") - } - return r -} - - -// Sub32 performs - operation on two int32 operands -// returning a result and status -func Sub32(a, b int32) (int32, bool) { - c := a - b - if (c < a) == (b > 0) { - return c, true - } - return c, false -} - -// Sub32p is the unchecked panicing version of Sub32 -func Sub32p(a, b int32) int32 { - r, ok := Sub32(a, b) - if !ok { - panic("subtraction overflow") - } - return r -} - - -// Mul32 performs * operation on two int32 operands -// returning a result and status -func Mul32(a, b int32) (int32, bool) { - if a == 0 || b == 0 { - return 0, true - } - c := a * b - if (c < 0) == ((a < 0) != (b < 0)) { - if c/b == a { - return c, true - } - } - return c, false -} - -// Mul32p is the unchecked panicing version of Mul32 -func Mul32p(a, b int32) int32 { - r, ok := Mul32(a, b) - if !ok { - panic("multiplication overflow") - } - return r -} - - - -// Div32 performs / operation on two int32 operands -// returning a result and status -func Div32(a, b int32) (int32, bool) { - q, _, ok := Quotient32(a, b) - return q, ok -} - -// Div32p is the unchecked panicing version of Div32 -func Div32p(a, b int32) int32 { - r, ok := Div32(a, b) - if !ok { - panic("division failure") - } - return r -} - -// Quotient32 performs + operation on two int32 operands -// returning a quotient, a remainder and status -func Quotient32(a, b int32) (int32, int32, bool) { - if b == 0 { - return 0, 0, false - } - c := a / b - status := (c < 0) == ((a < 0) != (b < 0)) - return c, a % b, status -} - - - -// Add64 performs + operation on two int64 operands -// returning a result and status -func Add64(a, b int64) (int64, bool) { - c := a + b - if (c > a) == (b > 0) { - return c, true - } - return c, false -} - -// Add64p is the unchecked panicing version of Add64 -func Add64p(a, b int64) int64 { - r, ok := Add64(a, b) - if !ok { - panic("addition overflow") - } - return r -} - - -// Sub64 performs - operation on two int64 operands -// returning a result and status -func Sub64(a, b int64) (int64, bool) { - c := a - b - if (c < a) == (b > 0) { - return c, true - } - return c, false -} - -// Sub64p is the unchecked panicing version of Sub64 -func Sub64p(a, b int64) int64 { - r, ok := Sub64(a, b) - if !ok { - panic("subtraction overflow") - } - return r -} - - -// Mul64 performs * operation on two int64 operands -// returning a result and status -func Mul64(a, b int64) (int64, bool) { - if a == 0 || b == 0 { - return 0, true - } - c := a * b - if (c < 0) == ((a < 0) != (b < 0)) { - if c/b == a { - return c, true - } - } - return c, false -} - -// Mul64p is the unchecked panicing version of Mul64 -func Mul64p(a, b int64) int64 { - r, ok := Mul64(a, b) - if !ok { - panic("multiplication overflow") - } - return r -} - - - -// Div64 performs / operation on two int64 operands -// returning a result and status -func Div64(a, b int64) (int64, bool) { - q, _, ok := Quotient64(a, b) - return q, ok -} - -// Div64p is the unchecked panicing version of Div64 -func Div64p(a, b int64) int64 { - r, ok := Div64(a, b) - if !ok { - panic("division failure") - } - return r -} - -// Quotient64 performs + operation on two int64 operands -// returning a quotient, a remainder and status -func Quotient64(a, b int64) (int64, int64, bool) { - if b == 0 { - return 0, 0, false - } - c := a / b - status := (c < 0) == ((a < 0) != (b < 0)) - return c, a % b, status -} - diff --git a/vendor/github.com/JohnCGriffin/overflow/overflow_template.sh b/vendor/github.com/JohnCGriffin/overflow/overflow_template.sh deleted file mode 100644 index b21fb04c..00000000 --- a/vendor/github.com/JohnCGriffin/overflow/overflow_template.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/sh - -exec > overflow_impl.go - -echo "package overflow - -// This is generated code, created by overflow_template.sh executed -// by \"go generate\" - -" - - -for SIZE in 8 16 32 64 -do -echo " - -// Add${SIZE} performs + operation on two int${SIZE} operands -// returning a result and status -func Add${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) { - c := a + b - if (c > a) == (b > 0) { - return c, true - } - return c, false -} - -// Add${SIZE}p is the unchecked panicing version of Add${SIZE} -func Add${SIZE}p(a, b int${SIZE}) int${SIZE} { - r, ok := Add${SIZE}(a, b) - if !ok { - panic(\"addition overflow\") - } - return r -} - - -// Sub${SIZE} performs - operation on two int${SIZE} operands -// returning a result and status -func Sub${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) { - c := a - b - if (c < a) == (b > 0) { - return c, true - } - return c, false -} - -// Sub${SIZE}p is the unchecked panicing version of Sub${SIZE} -func Sub${SIZE}p(a, b int${SIZE}) int${SIZE} { - r, ok := Sub${SIZE}(a, b) - if !ok { - panic(\"subtraction overflow\") - } - return r -} - - -// Mul${SIZE} performs * operation on two int${SIZE} operands -// returning a result and status -func Mul${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) { - if a == 0 || b == 0 { - return 0, true - } - c := a * b - if (c < 0) == ((a < 0) != (b < 0)) { - if c/b == a { - return c, true - } - } - return c, false -} - -// Mul${SIZE}p is the unchecked panicing version of Mul${SIZE} -func Mul${SIZE}p(a, b int${SIZE}) int${SIZE} { - r, ok := Mul${SIZE}(a, b) - if !ok { - panic(\"multiplication overflow\") - } - return r -} - - - -// Div${SIZE} performs / operation on two int${SIZE} operands -// returning a result and status -func Div${SIZE}(a, b int${SIZE}) (int${SIZE}, bool) { - q, _, ok := Quotient${SIZE}(a, b) - return q, ok -} - -// Div${SIZE}p is the unchecked panicing version of Div${SIZE} -func Div${SIZE}p(a, b int${SIZE}) int${SIZE} { - r, ok := Div${SIZE}(a, b) - if !ok { - panic(\"division failure\") - } - return r -} - -// Quotient${SIZE} performs + operation on two int${SIZE} operands -// returning a quotient, a remainder and status -func Quotient${SIZE}(a, b int${SIZE}) (int${SIZE}, int${SIZE}, bool) { - if b == 0 { - return 0, 0, false - } - c := a / b - status := (c < 0) == ((a < 0) != (b < 0)) - return c, a % b, status -} -" -done diff --git a/vendor/github.com/andybalholm/brotli/encoder.go b/vendor/github.com/andybalholm/brotli/encoder.go index 650d1e42..19283825 100644 --- a/vendor/github.com/andybalholm/brotli/encoder.go +++ b/vendor/github.com/andybalholm/brotli/encoder.go @@ -21,6 +21,15 @@ func (e *Encoder) Encode(dst []byte, src []byte, matches []matchfinder.Match, la e.wroteHeader = true } + if len(src) == 0 { + if lastBlock { + e.bw.writeBits(2, 3) // islast + isempty + e.bw.jumpToByteBoundary() + return e.bw.dst + } + return dst + } + var literalHisto [256]uint32 var commandHisto [704]uint32 var distanceHisto [64]uint32 diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go index 37ed8e13..507d1cae 100644 --- a/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go +++ b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go @@ -32,14 +32,3 @@ func (e *matchEmitter) emit(m absoluteMatch) { }) e.NextEmit = m.End } - -// trim shortens m if it extends past maxEnd. Then if the length is at least -// minLength, the match is emitted. -func (e *matchEmitter) trim(m absoluteMatch, maxEnd int, minLength int) { - if m.End > maxEnd { - m.End = maxEnd - } - if m.End-m.Start >= minLength { - e.emit(m) - } -} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m4.go b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go index 5b2acba2..81894725 100644 --- a/vendor/github.com/andybalholm/brotli/matchfinder/m4.go +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go @@ -56,7 +56,7 @@ func (q *M4) Reset() { } func (q *M4) score(m absoluteMatch) int { - return (m.End-m.Start)*256 + bits.LeadingZeros32(uint32(m.Start-m.Match))*q.DistanceBitCost + return (m.End-m.Start)*256 + (bits.LeadingZeros32(uint32(m.Start-m.Match))-32)*q.DistanceBitCost } func (q *M4) FindMatches(dst []Match, src []byte) []Match { @@ -112,7 +112,12 @@ func (q *M4) FindMatches(dst []Match, src []byte) []Match { // We have found some matches, and we're far enough along that we probably // won't find overlapping matches, so we might as well emit them. if matches[1] != (absoluteMatch{}) { - e.trim(matches[1], matches[0].Start, q.MinLength) + if matches[1].End > matches[0].Start { + matches[1].End = matches[0].Start + } + if matches[1].End-matches[1].Start >= q.MinLength && q.score(matches[1]) > 0 { + e.emit(matches[1]) + } } e.emit(matches[0]) matches = [3]absoluteMatch{} @@ -139,12 +144,10 @@ func (q *M4) FindMatches(dst []Match, src []byte) []Match { // Look for a match. var currentMatch absoluteMatch - if i-candidate != matches[0].Start-matches[0].Match { - if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { - m := extendMatch2(src, i, candidate, e.NextEmit) - if m.End-m.Start > q.MinLength { - currentMatch = m - } + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > 0 { + currentMatch = m } } @@ -157,12 +160,10 @@ func (q *M4) FindMatches(dst []Match, src []byte) []Match { if candidate <= 0 || i-candidate > q.MaxDistance { break } - if i-candidate != matches[0].Start-matches[0].Match { - if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { - m := extendMatch2(src, i, candidate, e.NextEmit) - if m.End-m.Start > q.MinLength && q.score(m) > q.score(currentMatch) { - currentMatch = m - } + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > q.score(currentMatch) { + currentMatch = m } } } @@ -217,14 +218,24 @@ func (q *M4) FindMatches(dst []Match, src []byte) []Match { default: // Emit the first match, shortening it if necessary to avoid overlap with the second. - e.trim(matches[2], matches[1].Start, q.MinLength) + if matches[2].End > matches[1].Start { + matches[2].End = matches[1].Start + } + if matches[2].End-matches[2].Start >= q.MinLength && q.score(matches[2]) > 0 { + e.emit(matches[2]) + } matches[2] = absoluteMatch{} } } // We've found all the matches now; emit the remaining ones. if matches[1] != (absoluteMatch{}) { - e.trim(matches[1], matches[0].Start, q.MinLength) + if matches[1].End > matches[0].Start { + matches[1].End = matches[0].Start + } + if matches[1].End-matches[1].Start >= q.MinLength && q.score(matches[1]) > 0 { + e.emit(matches[1]) + } } if matches[0] != (absoluteMatch{}) { e.emit(matches[0]) diff --git a/vendor/github.com/apache/arrow-go/v18/LICENSE.txt b/vendor/github.com/apache/arrow-go/v18/LICENSE.txt new file mode 100644 index 00000000..782594aa --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/LICENSE.txt @@ -0,0 +1,257 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS): + + * arrow/flight/cookie_middleware.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the LLVM project: + +* arrow/compute/internal/kernels/_lib/types.h + +Apache License v2.0 with LLVM Exceptions. +See https://llvm.org/LICENSE.txt for license information. +SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +-------------------------------------------------------------------------------- + +This project includes code from the brotli project (https://github.com/google/brotli): + +* parquet/compress/brotli.go + +Copyright: 2013 Google Inc. All Rights Reserved +Distributed under MIT License. diff --git a/vendor/github.com/apache/arrow-go/v18/NOTICE.txt b/vendor/github.com/apache/arrow-go/v18/NOTICE.txt new file mode 100644 index 00000000..93423469 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/NOTICE.txt @@ -0,0 +1,5 @@ +Apache Arrow Go +Copyright 2016-2025 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig b/vendor/github.com/apache/arrow-go/v18/arrow/.editorconfig similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig rename to vendor/github.com/apache/arrow-go/v18/arrow/.editorconfig diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/.gitignore b/vendor/github.com/apache/arrow-go/v18/arrow/.gitignore similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/.gitignore rename to vendor/github.com/apache/arrow-go/v18/arrow/.gitignore diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock b/vendor/github.com/apache/arrow-go/v18/arrow/Gopkg.lock similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock rename to vendor/github.com/apache/arrow-go/v18/arrow/Gopkg.lock diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml b/vendor/github.com/apache/arrow-go/v18/arrow/Gopkg.toml similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml rename to vendor/github.com/apache/arrow-go/v18/arrow/Gopkg.toml diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/Makefile b/vendor/github.com/apache/arrow-go/v18/arrow/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/Makefile rename to vendor/github.com/apache/arrow-go/v18/arrow/Makefile diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array.go b/vendor/github.com/apache/arrow-go/v18/arrow/array.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/array.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array.go index 7622e750..df186f2d 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array.go @@ -19,8 +19,8 @@ package arrow import ( "fmt" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // ArrayData is the underlying memory and metadata of an Arrow array, corresponding @@ -81,6 +81,8 @@ type ArrayData interface { // Dictionary returns the ArrayData object for the dictionary if this is a // dictionary array, otherwise it will be nil. Dictionary() ArrayData + // SizeInBytes returns the size of the ArrayData buffers and any children and/or dictionary in bytes. + SizeInBytes() uint64 } // Array represents an immutable sequence of values using the Arrow in-memory format. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/array.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/array.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/array.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/array.go index 1ee04c7a..6e281a43 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/array.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/array.go @@ -19,9 +19,9 @@ package array import ( "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) const ( @@ -160,6 +160,8 @@ func init() { arrow.TIME64: func(data arrow.ArrayData) arrow.Array { return NewTime64Data(data) }, arrow.INTERVAL_MONTHS: func(data arrow.ArrayData) arrow.Array { return NewMonthIntervalData(data) }, arrow.INTERVAL_DAY_TIME: func(data arrow.ArrayData) arrow.Array { return NewDayTimeIntervalData(data) }, + arrow.DECIMAL32: func(data arrow.ArrayData) arrow.Array { return NewDecimal32Data(data) }, + arrow.DECIMAL64: func(data arrow.ArrayData) arrow.Array { return NewDecimal64Data(data) }, arrow.DECIMAL128: func(data arrow.ArrayData) arrow.Array { return NewDecimal128Data(data) }, arrow.DECIMAL256: func(data arrow.ArrayData) arrow.Array { return NewDecimal256Data(data) }, arrow.LIST: func(data arrow.ArrayData) arrow.Array { return NewListData(data) }, @@ -178,7 +180,8 @@ func init() { arrow.RUN_END_ENCODED: func(data arrow.ArrayData) arrow.Array { return NewRunEndEncodedData(data) }, arrow.LIST_VIEW: func(data arrow.ArrayData) arrow.Array { return NewListViewData(data) }, arrow.LARGE_LIST_VIEW: func(data arrow.ArrayData) arrow.Array { return NewLargeListViewData(data) }, - + arrow.BINARY_VIEW: func(data arrow.ArrayData) arrow.Array { return NewBinaryViewData(data) }, + arrow.STRING_VIEW: func(data arrow.ArrayData) arrow.Array { return NewStringViewData(data) }, // invalid data types to fill out array to size 2^6 - 1 63: invalidDataType, } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/binary.go similarity index 70% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/binary.go index e9e6e66e..1af7631b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/binary.go @@ -23,12 +23,14 @@ import ( "strings" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) type BinaryLike interface { arrow.Array + ValueLen(int) int ValueBytes() []byte ValueOffset64(int) int64 } @@ -318,6 +320,134 @@ func arrayEqualLargeBinary(left, right *LargeBinary) bool { return true } +type ViewLike interface { + arrow.Array + ValueHeader(int) *arrow.ViewHeader +} + +type BinaryView struct { + array + values []arrow.ViewHeader + dataBuffers []*memory.Buffer +} + +func NewBinaryViewData(data arrow.ArrayData) *BinaryView { + a := &BinaryView{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *BinaryView) setData(data *Data) { + if len(data.buffers) < 2 { + panic("len(data.buffers) < 2") + } + a.array.setData(data) + + if valueData := data.buffers[1]; valueData != nil { + a.values = arrow.ViewHeaderTraits.CastFromBytes(valueData.Bytes()) + } + + a.dataBuffers = data.buffers[2:] +} + +func (a *BinaryView) ValueHeader(i int) *arrow.ViewHeader { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + return &a.values[a.array.data.offset+i] +} + +func (a *BinaryView) Value(i int) []byte { + s := a.ValueHeader(i) + if s.IsInline() { + return s.InlineBytes() + } + start := s.BufferOffset() + buf := a.dataBuffers[s.BufferIndex()] + return buf.Bytes()[start : start+int32(s.Len())] +} + +func (a *BinaryView) ValueLen(i int) int { + s := a.ValueHeader(i) + return s.Len() +} + +// ValueString returns the value at index i as a string instead of +// a byte slice, without copying the underlying data. +func (a *BinaryView) ValueString(i int) string { + b := a.Value(i) + return *(*string)(unsafe.Pointer(&b)) +} + +func (a *BinaryView) String() string { + var o strings.Builder + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString(NullValueStr) + default: + fmt.Fprintf(&o, "%q", a.ValueString(i)) + } + } + o.WriteString("]") + return o.String() +} + +// ValueStr is paired with AppendValueFromString in that it returns +// the value at index i as a string: Semantically this means that for +// a null value it will return the string "(null)", otherwise it will +// return the value as a base64 encoded string suitable for CSV/JSON. +// +// This is always going to be less performant than just using ValueString +// and exists to fulfill the Array interface to provide a method which +// can produce a human readable string for a given index. +func (a *BinaryView) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return base64.StdEncoding.EncodeToString(a.Value(i)) +} + +func (a *BinaryView) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.Value(i) +} + +func (a *BinaryView) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + // golang marshal standard says that []byte will be marshalled + // as a base64-encoded string + return json.Marshal(vals) +} + +func arrayEqualBinaryView(left, right *BinaryView) bool { + leftBufs, rightBufs := left.dataBuffers, right.dataBuffers + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !left.ValueHeader(i).Equals(leftBufs, right.ValueHeader(i), rightBufs) { + return false + } + } + return true +} + var ( _ arrow.Array = (*Binary)(nil) + _ arrow.Array = (*LargeBinary)(nil) + _ arrow.Array = (*BinaryView)(nil) + + _ BinaryLike = (*Binary)(nil) + _ BinaryLike = (*LargeBinary)(nil) ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/binarybuilder.go similarity index 56% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/binarybuilder.go index 3cb709b4..794ac688 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/binarybuilder.go @@ -23,11 +23,12 @@ import ( "math" "reflect" "sync/atomic" + "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // A BinaryBuilder is used to build a Binary array using the Append methods. @@ -370,6 +371,334 @@ func (b *BinaryBuilder) UnmarshalJSON(data []byte) error { return b.Unmarshal(dec) } +const ( + dfltBlockSize = 32 << 10 // 32 KB + viewValueSizeLimit int32 = math.MaxInt32 +) + +type BinaryViewBuilder struct { + builder + dtype arrow.BinaryDataType + + data *memory.Buffer + rawData []arrow.ViewHeader + + blockBuilder multiBufferBuilder +} + +func NewBinaryViewBuilder(mem memory.Allocator) *BinaryViewBuilder { + return &BinaryViewBuilder{ + dtype: arrow.BinaryTypes.BinaryView, + builder: builder{ + refCount: 1, + mem: mem, + }, + blockBuilder: multiBufferBuilder{ + refCount: 1, + blockSize: dfltBlockSize, + mem: mem, + }, + } +} + +func (b *BinaryViewBuilder) SetBlockSize(sz uint) { + b.blockBuilder.blockSize = int(sz) +} + +func (b *BinaryViewBuilder) Type() arrow.DataType { return b.dtype } + +func (b *BinaryViewBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) != 0 { + return + } + + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } +} + +func (b *BinaryViewBuilder) init(capacity int) { + b.builder.init(capacity) + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.ViewHeaderTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.ViewHeaderTraits.CastFromBytes(b.data.Bytes()) +} + +func (b *BinaryViewBuilder) Resize(n int) { + nbuild := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + return + } + + b.builder.resize(nbuild, b.init) + b.data.Resize(arrow.ViewHeaderTraits.BytesRequired(n)) + b.rawData = arrow.ViewHeaderTraits.CastFromBytes(b.data.Bytes()) +} + +func (b *BinaryViewBuilder) ReserveData(length int) { + if int32(length) > viewValueSizeLimit { + panic(fmt.Errorf("%w: BinaryView or StringView elements cannot reference strings larger than 2GB", + arrow.ErrInvalid)) + } + b.blockBuilder.Reserve(int(length)) +} + +func (b *BinaryViewBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +func (b *BinaryViewBuilder) Append(v []byte) { + if int32(len(v)) > viewValueSizeLimit { + panic(fmt.Errorf("%w: BinaryView or StringView elements cannot reference strings larger than 2GB", arrow.ErrInvalid)) + } + + if !arrow.IsViewInline(len(v)) { + b.ReserveData(len(v)) + } + + b.Reserve(1) + b.UnsafeAppend(v) +} + +// AppendString is identical to Append, only accepting a string instead +// of a byte slice, avoiding the extra copy that would occur if you simply +// did []byte(v). +// +// This is different than AppendValueFromString which exists for the +// Builder interface, in that this expects raw binary data which is +// appended unmodified. AppendValueFromString expects base64 encoded binary +// data instead. +func (b *BinaryViewBuilder) AppendString(v string) { + // create a []byte without copying the bytes + // in go1.20 this would be unsafe.StringData + val := *(*[]byte)(unsafe.Pointer(&struct { + string + int + }{v, len(v)})) + b.Append(val) +} + +func (b *BinaryViewBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *BinaryViewBuilder) AppendNulls(n int) { + b.Reserve(n) + for i := 0; i < n; i++ { + b.UnsafeAppendBoolToBitmap(false) + } +} + +func (b *BinaryViewBuilder) AppendEmptyValue() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *BinaryViewBuilder) AppendEmptyValues(n int) { + b.Reserve(n) + b.unsafeAppendBoolsToBitmap(nil, n) +} + +func (b *BinaryViewBuilder) UnsafeAppend(v []byte) { + hdr := &b.rawData[b.length] + hdr.SetBytes(v) + if !hdr.IsInline() { + b.blockBuilder.UnsafeAppend(hdr, v) + } + b.UnsafeAppendBoolToBitmap(true) +} + +func (b *BinaryViewBuilder) AppendValues(v [][]byte, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + outOfLineTotal := 0 + for i, vv := range v { + if len(valid) == 0 || valid[i] { + if !arrow.IsViewInline(len(vv)) { + outOfLineTotal += len(vv) + } + } + } + + b.ReserveData(outOfLineTotal) + for i, vv := range v { + if len(valid) == 0 || valid[i] { + hdr := &b.rawData[b.length+i] + hdr.SetBytes(vv) + if !hdr.IsInline() { + b.blockBuilder.UnsafeAppend(hdr, vv) + } + } + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *BinaryViewBuilder) AppendStringValues(v []string, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + outOfLineTotal := 0 + for i, vv := range v { + if len(valid) == 0 || valid[i] { + if !arrow.IsViewInline(len(vv)) { + outOfLineTotal += len(vv) + } + } + } + + b.ReserveData(outOfLineTotal) + for i, vv := range v { + if len(valid) == 0 || valid[i] { + hdr := &b.rawData[b.length+i] + hdr.SetString(vv) + if !hdr.IsInline() { + b.blockBuilder.UnsafeAppendString(hdr, vv) + } + } + } + + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +// AppendValueFromString is paired with ValueStr for fulfilling the +// base Builder interface. This is intended to read in a human-readable +// string such as from CSV or JSON and append it to the array. +// +// For Binary values are expected to be base64 encoded (and will be +// decoded as such before being appended). +func (b *BinaryViewBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + if b.dtype.IsUtf8() { + b.Append([]byte(s)) + return nil + } + + decodedVal, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return fmt.Errorf("could not decode base64 string: %w", err) + } + b.Append(decodedVal) + return nil +} + +func (b *BinaryViewBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case string: + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err + } + b.Append(data) + case []byte: + b.Append(v) + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf([]byte{}), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *BinaryViewBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *BinaryViewBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary view builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +func (b *BinaryViewBuilder) newData() (data *Data) { + bytesRequired := arrow.ViewHeaderTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + + dataBuffers := b.blockBuilder.Finish() + data = NewData(b.dtype, b.length, append([]*memory.Buffer{ + b.nullBitmap, b.data}, dataBuffers...), nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + for _, buf := range dataBuffers { + buf.Release() + } + } + return +} + +func (b *BinaryViewBuilder) NewBinaryViewArray() (a *BinaryView) { + data := b.newData() + a = NewBinaryViewData(data) + data.Release() + return +} + +func (b *BinaryViewBuilder) NewArray() arrow.Array { + return b.NewBinaryViewArray() +} + var ( _ Builder = (*BinaryBuilder)(nil) + _ Builder = (*BinaryViewBuilder)(nil) ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/boolean.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/boolean.go index 464cef48..fb2dba73 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/boolean.go @@ -21,10 +21,10 @@ import ( "strconv" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // A type which represents an immutable sequence of boolean values. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/booleanbuilder.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/booleanbuilder.go index 10b7405a..951fe3a9 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/booleanbuilder.go @@ -23,11 +23,11 @@ import ( "strconv" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) type BooleanBuilder struct { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder.go similarity index 60% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder.go index e023b0d9..085d43ef 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder.go @@ -18,10 +18,12 @@ package array import ( "sync/atomic" + "unsafe" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" ) type bufBuilder interface { @@ -151,3 +153,109 @@ func (b *bufferBuilder) unsafeAppend(data []byte) { copy(b.bytes[b.length:], data) b.length += len(data) } + +type multiBufferBuilder struct { + refCount int64 + blockSize int + + mem memory.Allocator + blocks []*memory.Buffer + currentOutBuffer int +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (b *multiBufferBuilder) Retain() { + atomic.AddInt64(&b.refCount, 1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (b *multiBufferBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + b.Reset() + } +} + +func (b *multiBufferBuilder) Reserve(nbytes int) { + if len(b.blocks) == 0 { + out := memory.NewResizableBuffer(b.mem) + if nbytes < b.blockSize { + nbytes = b.blockSize + } + out.Reserve(nbytes) + b.currentOutBuffer = 0 + b.blocks = []*memory.Buffer{out} + return + } + + curBuf := b.blocks[b.currentOutBuffer] + remain := curBuf.Cap() - curBuf.Len() + if nbytes <= remain { + return + } + + // search for underfull block that has enough bytes + for i, block := range b.blocks { + remaining := block.Cap() - block.Len() + if nbytes <= remaining { + b.currentOutBuffer = i + return + } + } + + // current buffer doesn't have enough space, no underfull buffers + // make new buffer and set that as our current. + newBuf := memory.NewResizableBuffer(b.mem) + if nbytes < b.blockSize { + nbytes = b.blockSize + } + + newBuf.Reserve(nbytes) + b.currentOutBuffer = len(b.blocks) + b.blocks = append(b.blocks, newBuf) +} + +func (b *multiBufferBuilder) RemainingBytes() int { + if len(b.blocks) == 0 { + return 0 + } + + buf := b.blocks[b.currentOutBuffer] + return buf.Cap() - buf.Len() +} + +func (b *multiBufferBuilder) Reset() { + b.currentOutBuffer = 0 + for _, block := range b.Finish() { + block.Release() + } +} + +func (b *multiBufferBuilder) UnsafeAppend(hdr *arrow.ViewHeader, val []byte) { + buf := b.blocks[b.currentOutBuffer] + idx, offset := b.currentOutBuffer, buf.Len() + hdr.SetIndexOffset(int32(idx), int32(offset)) + + n := copy(buf.Buf()[offset:], val) + buf.ResizeNoShrink(offset + n) +} + +func (b *multiBufferBuilder) UnsafeAppendString(hdr *arrow.ViewHeader, val string) { + // create a byte slice with zero-copies + // in go1.20 this would be equivalent to unsafe.StringData + v := *(*[]byte)(unsafe.Pointer(&struct { + string + int + }{val, len(val)})) + b.UnsafeAppend(hdr, v) +} + +func (b *multiBufferBuilder) Finish() (out []*memory.Buffer) { + b.currentOutBuffer = 0 + out, b.blocks = b.blocks, nil + return +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_byte.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_byte.go index 00a0d1c2..78bb938e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_byte.go @@ -16,7 +16,7 @@ package array -import "github.com/apache/arrow/go/v14/arrow/memory" +import "github.com/apache/arrow-go/v18/arrow/memory" type byteBufferBuilder struct { bufferBuilder diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go index 879bc9f5..3812c5e7 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go @@ -19,9 +19,9 @@ package array import ( - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/memory" ) type int64BufferBuilder struct { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go.tmpl similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go.tmpl index e859b5bf..c3c39de1 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/bufferbuilder_numeric.gen.go.tmpl @@ -17,9 +17,9 @@ package array import ( - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/memory" ) {{range .In}} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/builder.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/builder.go index 2f15ac96..a2a40d48 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/builder.go @@ -20,10 +20,10 @@ import ( "fmt" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) const ( @@ -313,6 +313,14 @@ func NewBuilder(mem memory.Allocator, dtype arrow.DataType) Builder { return NewDayTimeIntervalBuilder(mem) case arrow.INTERVAL_MONTH_DAY_NANO: return NewMonthDayNanoIntervalBuilder(mem) + case arrow.DECIMAL32: + if typ, ok := dtype.(*arrow.Decimal32Type); ok { + return NewDecimal32Builder(mem, typ) + } + case arrow.DECIMAL64: + if typ, ok := dtype.(*arrow.Decimal64Type); ok { + return NewDecimal64Builder(mem, typ) + } case arrow.DECIMAL128: if typ, ok := dtype.(*arrow.Decimal128Type); ok { return NewDecimal128Builder(mem, typ) @@ -349,21 +357,26 @@ func NewBuilder(mem memory.Allocator, dtype arrow.DataType) Builder { typ := dtype.(*arrow.LargeListViewType) return NewLargeListViewBuilderWithField(mem, typ.ElemField()) case arrow.EXTENSION: - typ := dtype.(arrow.ExtensionType) - bldr := NewExtensionBuilder(mem, typ) - if custom, ok := typ.(ExtensionBuilderWrapper); ok { - return custom.NewBuilder(bldr) + if custom, ok := dtype.(CustomExtensionBuilder); ok { + return custom.NewBuilder(mem) + } + if typ, ok := dtype.(arrow.ExtensionType); ok { + return NewExtensionBuilder(mem, typ) } - return bldr + panic(fmt.Errorf("arrow/array: invalid extension type: %T", dtype)) case arrow.FIXED_SIZE_LIST: typ := dtype.(*arrow.FixedSizeListType) - return NewFixedSizeListBuilder(mem, typ.Len(), typ.Elem()) + return NewFixedSizeListBuilderWithField(mem, typ.Len(), typ.ElemField()) case arrow.DURATION: typ := dtype.(*arrow.DurationType) return NewDurationBuilder(mem, typ) case arrow.RUN_END_ENCODED: typ := dtype.(*arrow.RunEndEncodedType) return NewRunEndEncodedBuilder(mem, typ.RunEnds(), typ.Encoded()) + case arrow.BINARY_VIEW: + return NewBinaryViewBuilder(mem) + case arrow.STRING_VIEW: + return NewStringViewBuilder(mem) } panic(fmt.Errorf("arrow/array: unsupported builder for %T", dtype)) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/compare.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/compare.go index e70716be..e412febf 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/compare.go @@ -20,9 +20,9 @@ import ( "fmt" "math" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/internal/bitutils" ) // RecordEqual reports whether the two provided records are equal. @@ -232,6 +232,12 @@ func Equal(left, right arrow.Array) bool { case *LargeString: r := right.(*LargeString) return arrayEqualLargeString(l, r) + case *BinaryView: + r := right.(*BinaryView) + return arrayEqualBinaryView(l, r) + case *StringView: + r := right.(*StringView) + return arrayEqualStringView(l, r) case *Int8: r := right.(*Int8) return arrayEqualInt8(l, r) @@ -265,12 +271,18 @@ func Equal(left, right arrow.Array) bool { case *Float64: r := right.(*Float64) return arrayEqualFloat64(l, r) + case *Decimal32: + r := right.(*Decimal32) + return arrayEqualDecimal(l, r) + case *Decimal64: + r := right.(*Decimal64) + return arrayEqualDecimal(l, r) case *Decimal128: r := right.(*Decimal128) - return arrayEqualDecimal128(l, r) + return arrayEqualDecimal(l, r) case *Decimal256: r := right.(*Decimal256) - return arrayEqualDecimal256(l, r) + return arrayEqualDecimal(l, r) case *Date32: r := right.(*Date32) return arrayEqualDate32(l, r) @@ -475,13 +487,19 @@ func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool { return arrayEqualBinary(l, r) case *String: r := right.(*String) - return arrayEqualString(l, r) + return arrayApproxEqualString(l, r) case *LargeBinary: r := right.(*LargeBinary) return arrayEqualLargeBinary(l, r) case *LargeString: r := right.(*LargeString) - return arrayEqualLargeString(l, r) + return arrayApproxEqualLargeString(l, r) + case *BinaryView: + r := right.(*BinaryView) + return arrayEqualBinaryView(l, r) + case *StringView: + r := right.(*StringView) + return arrayApproxEqualStringView(l, r) case *Int8: r := right.(*Int8) return arrayEqualInt8(l, r) @@ -515,12 +533,18 @@ func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool { case *Float64: r := right.(*Float64) return arrayApproxEqualFloat64(l, r, opt) + case *Decimal32: + r := right.(*Decimal32) + return arrayEqualDecimal(l, r) + case *Decimal64: + r := right.(*Decimal64) + return arrayEqualDecimal(l, r) case *Decimal128: r := right.(*Decimal128) - return arrayEqualDecimal128(l, r) + return arrayEqualDecimal(l, r) case *Decimal256: r := right.(*Decimal256) - return arrayEqualDecimal256(l, r) + return arrayEqualDecimal(l, r) case *Date32: r := right.(*Date32) return arrayEqualDate32(l, r) @@ -620,6 +644,42 @@ func validityBitmapEqual(left, right arrow.Array) bool { return true } +func arrayApproxEqualString(left, right *String) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if stripNulls(left.Value(i)) != stripNulls(right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualLargeString(left, right *LargeString) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if stripNulls(left.Value(i)) != stripNulls(right.Value(i)) { + return false + } + } + return true +} + +func arrayApproxEqualStringView(left, right *StringView) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if stripNulls(left.Value(i)) != stripNulls(right.Value(i)) { + return false + } + } + return true +} + func arrayApproxEqualFloat16(left, right *Float16, opt equalOption) bool { for i := 0; i < left.Len(); i++ { if left.IsNull(i) { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/concat.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/concat.go index 9d815023..bb50354b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/concat.go @@ -23,13 +23,13 @@ import ( "math/bits" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/encoded" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/bitutils" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/encoded" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/bitutils" + "github.com/apache/arrow-go/v18/internal/utils" ) // Concatenate creates a new arrow.Array which is the concatenation of the @@ -520,12 +520,7 @@ func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData, out := &Data{refCount: 1, dtype: data[0].DataType(), nulls: 0} defer func() { if pErr := recover(); pErr != nil { - switch e := pErr.(type) { - case error: - err = fmt.Errorf("arrow/concat: %w", e) - default: - err = fmt.Errorf("arrow/concat: %v", pErr) - } + err = utils.FormatRecoveredError("arrow/concat", pErr) } if err != nil { out.Release() @@ -600,6 +595,35 @@ func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData, } case arrow.FixedWidthDataType: out.buffers[1] = concatBuffers(gatherBuffersFixedWidthType(data, 1, dt), mem) + case arrow.BinaryViewDataType: + out.buffers = out.buffers[:2] + for _, d := range data { + for _, buf := range d.Buffers()[2:] { + buf.Retain() + out.buffers = append(out.buffers, buf) + } + } + + out.buffers[1] = concatBuffers(gatherFixedBuffers(data, 1, arrow.ViewHeaderSizeBytes), mem) + + var ( + s = arrow.ViewHeaderTraits.CastFromBytes(out.buffers[1].Bytes()) + i = data[0].Len() + precedingBufsCount int + ) + + for idx := 1; idx < len(data); idx++ { + precedingBufsCount += len(data[idx-1].Buffers()) - 2 + + for end := i + data[idx].Len(); i < end; i++ { + if s[i].IsInline() { + continue + } + + bufIndex := s[i].BufferIndex() + int32(precedingBufsCount) + s[i].SetIndexOffset(bufIndex, s[i].BufferOffset()) + } + } case arrow.BinaryDataType: offsetWidth := dt.Layout().Buffers[1].ByteWidth offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem) @@ -666,7 +690,7 @@ func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData, } out.childData = []arrow.ArrayData{children} case *arrow.StructType: - out.childData = make([]arrow.ArrayData, len(dt.Fields())) + out.childData = make([]arrow.ArrayData, dt.NumFields()) for i := range dt.Fields() { children := gatherChildren(data, i) for _, c := range children { @@ -739,7 +763,6 @@ func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData, out.childData[0].Release() return nil, err } - default: return nil, fmt.Errorf("concatenate not implemented for type %s", dt) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/data.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/data.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/data.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/data.go index 49df06fb..be75c7c7 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/data.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/data.go @@ -22,9 +22,9 @@ import ( "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" ) // Data represents the memory and metadata of an Arrow array. @@ -190,9 +190,36 @@ func (d *Data) SetDictionary(dict arrow.ArrayData) { } } +// SizeInBytes returns the size of the Data and any children and/or dictionary in bytes by +// recursively examining the nested structures of children and/or dictionary. +// The value returned is an upper-bound since offset is not taken into account. +func (d *Data) SizeInBytes() uint64 { + var size uint64 + + if d == nil { + return 0 + } + + for _, b := range d.Buffers() { + if b != nil { + size += uint64(b.Len()) + } + } + for _, c := range d.Children() { + size += c.SizeInBytes() + } + if d.dictionary != nil { + size += d.dictionary.SizeInBytes() + } + + return size +} + // NewSliceData returns a new slice that shares backing data with the input. // The returned Data slice starts at i and extends j-i elements, such as: -// slice := data[i:j] +// +// slice := data[i:j] +// // The returned value must be Release'd after use. // // NewSliceData panics if the slice is outside the valid range of the input Data. diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/decimal.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/decimal.go new file mode 100644 index 00000000..1a9d61c1 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/decimal.go @@ -0,0 +1,432 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "sync/atomic" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" +) + +type baseDecimal[T interface { + decimal.DecimalTypes + decimal.Num[T] +}] struct { + array + + values []T +} + +func newDecimalData[T interface { + decimal.DecimalTypes + decimal.Num[T] +}](data arrow.ArrayData) *baseDecimal[T] { + a := &baseDecimal[T]{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *baseDecimal[T]) Value(i int) T { return a.values[i] } + +func (a *baseDecimal[T]) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.GetOneForMarshal(i).(string) +} + +func (a *baseDecimal[T]) Values() []T { return a.values } + +func (a *baseDecimal[T]) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString(NullValueStr) + default: + fmt.Fprintf(o, "%v", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *baseDecimal[T]) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.GetData[T](vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *baseDecimal[T]) GetOneForMarshal(i int) any { + if a.IsNull(i) { + return nil + } + + typ := a.DataType().(arrow.DecimalType) + n, scale := a.Value(i), typ.GetScale() + return n.ToBigFloat(scale).Text('g', int(typ.GetPrecision())) +} + +func (a *baseDecimal[T]) MarshalJSON() ([]byte, error) { + vals := make([]any, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + return json.Marshal(vals) +} + +func arrayEqualDecimal[T interface { + decimal.DecimalTypes + decimal.Num[T] +}](left, right *baseDecimal[T]) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type Decimal32 = baseDecimal[decimal.Decimal32] + +func NewDecimal32Data(data arrow.ArrayData) *Decimal32 { + return newDecimalData[decimal.Decimal32](data) +} + +type Decimal64 = baseDecimal[decimal.Decimal64] + +func NewDecimal64Data(data arrow.ArrayData) *Decimal64 { + return newDecimalData[decimal.Decimal64](data) +} + +type Decimal128 = baseDecimal[decimal.Decimal128] + +func NewDecimal128Data(data arrow.ArrayData) *Decimal128 { + return newDecimalData[decimal.Decimal128](data) +} + +type Decimal256 = baseDecimal[decimal.Decimal256] + +func NewDecimal256Data(data arrow.ArrayData) *Decimal256 { + return newDecimalData[decimal.Decimal256](data) +} + +type Decimal32Builder = baseDecimalBuilder[decimal.Decimal32] +type Decimal64Builder = baseDecimalBuilder[decimal.Decimal64] +type Decimal128Builder struct { + *baseDecimalBuilder[decimal.Decimal128] +} + +func (b *Decimal128Builder) NewDecimal128Array() *Decimal128 { + return b.NewDecimalArray() +} + +type Decimal256Builder struct { + *baseDecimalBuilder[decimal.Decimal256] +} + +func (b *Decimal256Builder) NewDecimal256Array() *Decimal256 { + return b.NewDecimalArray() +} + +type baseDecimalBuilder[T interface { + decimal.DecimalTypes + decimal.Num[T] +}] struct { + builder + traits decimal.Traits[T] + + dtype arrow.DecimalType + data *memory.Buffer + rawData []T +} + +func newDecimalBuilder[T interface { + decimal.DecimalTypes + decimal.Num[T] +}, DT arrow.DecimalType](mem memory.Allocator, dtype DT) *baseDecimalBuilder[T] { + return &baseDecimalBuilder[T]{ + builder: builder{refCount: 1, mem: mem}, + dtype: dtype, + } +} + +func (b *baseDecimalBuilder[T]) Type() arrow.DataType { return b.dtype } + +func (b *baseDecimalBuilder[T]) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data, b.rawData = nil, nil + } + } +} + +func (b *baseDecimalBuilder[T]) Append(v T) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *baseDecimalBuilder[T]) UnsafeAppend(v T) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *baseDecimalBuilder[T]) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *baseDecimalBuilder[T]) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + +func (b *baseDecimalBuilder[T]) AppendEmptyValue() { + var empty T + b.Append(empty) +} + +func (b *baseDecimalBuilder[T]) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + +func (b *baseDecimalBuilder[T]) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *baseDecimalBuilder[T]) AppendValues(v []T, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + if len(v) > 0 { + copy(b.rawData[b.length:], v) + } + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *baseDecimalBuilder[T]) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := int(reflect.TypeFor[T]().Size()) * capacity + b.data.Resize(bytesN) + b.rawData = arrow.GetData[T](b.data.Bytes()) +} + +func (b *baseDecimalBuilder[T]) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +func (b *baseDecimalBuilder[T]) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(b.traits.BytesRequired(n)) + b.rawData = arrow.GetData[T](b.data.Bytes()) + } +} + +func (b *baseDecimalBuilder[T]) NewDecimalArray() (a *baseDecimal[T]) { + data := b.newData() + a = newDecimalData[T](data) + data.Release() + return +} + +func (b *baseDecimalBuilder[T]) NewArray() arrow.Array { + return b.NewDecimalArray() +} + +func (b *baseDecimalBuilder[T]) newData() (data *Data) { + bytesRequired := b.traits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data, b.rawData = nil, nil + } + + return +} + +func (b *baseDecimalBuilder[T]) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + val, err := b.traits.FromString(s, b.dtype.GetPrecision(), b.dtype.GetScale()) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + +func (b *baseDecimalBuilder[T]) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + var token T + switch v := t.(type) { + case float64: + token, err = b.traits.FromFloat64(v, b.dtype.GetPrecision(), b.dtype.GetScale()) + if err != nil { + return err + } + b.Append(token) + case string: + token, err = b.traits.FromString(v, b.dtype.GetPrecision(), b.dtype.GetScale()) + if err != nil { + return err + } + b.Append(token) + case json.Number: + token, err = b.traits.FromString(v.String(), b.dtype.GetPrecision(), b.dtype.GetScale()) + if err != nil { + return err + } + b.Append(token) + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeFor[T](), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *baseDecimalBuilder[T]) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *baseDecimalBuilder[T]) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("decimal builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +func NewDecimal32Builder(mem memory.Allocator, dtype *arrow.Decimal32Type) *Decimal32Builder { + b := newDecimalBuilder[decimal.Decimal32](mem, dtype) + b.traits = decimal.Dec32Traits + return b +} + +func NewDecimal64Builder(mem memory.Allocator, dtype *arrow.Decimal64Type) *Decimal64Builder { + b := newDecimalBuilder[decimal.Decimal64](mem, dtype) + b.traits = decimal.Dec64Traits + return b +} + +func NewDecimal128Builder(mem memory.Allocator, dtype *arrow.Decimal128Type) *Decimal128Builder { + b := newDecimalBuilder[decimal.Decimal128](mem, dtype) + b.traits = decimal.Dec128Traits + return &Decimal128Builder{b} +} + +func NewDecimal256Builder(mem memory.Allocator, dtype *arrow.Decimal256Type) *Decimal256Builder { + b := newDecimalBuilder[decimal.Decimal256](mem, dtype) + b.traits = decimal.Dec256Traits + return &Decimal256Builder{b} +} + +var ( + _ arrow.Array = (*Decimal32)(nil) + _ arrow.Array = (*Decimal64)(nil) + _ arrow.Array = (*Decimal128)(nil) + _ arrow.Array = (*Decimal256)(nil) + _ Builder = (*Decimal32Builder)(nil) + _ Builder = (*Decimal64Builder)(nil) + _ Builder = (*Decimal128Builder)(nil) + _ Builder = (*Decimal256Builder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/dictionary.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/dictionary.go index d0a1c4dc..0c23934a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/dictionary.go @@ -25,16 +25,17 @@ import ( "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/hashing" - "github.com/apache/arrow/go/v14/internal/json" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/hashing" + "github.com/apache/arrow-go/v18/internal/json" + "github.com/apache/arrow-go/v18/internal/utils" ) // Dictionary represents the type for dictionary-encoded data with a data @@ -392,7 +393,8 @@ func createMemoTable(mem memory.Allocator, dt arrow.DataType) (ret hashing.MemoT ret = hashing.NewFloat32MemoTable(0) case arrow.FLOAT64: ret = hashing.NewFloat64MemoTable(0) - case arrow.BINARY, arrow.FIXED_SIZE_BINARY, arrow.DECIMAL128, arrow.DECIMAL256, arrow.INTERVAL_DAY_TIME, arrow.INTERVAL_MONTH_DAY_NANO: + case arrow.BINARY, arrow.FIXED_SIZE_BINARY, arrow.DECIMAL32, arrow.DECIMAL64, + arrow.DECIMAL128, arrow.DECIMAL256, arrow.INTERVAL_DAY_TIME, arrow.INTERVAL_MONTH_DAY_NANO: ret = hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(mem, arrow.BinaryTypes.Binary)) case arrow.STRING: ret = hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(mem, arrow.BinaryTypes.String)) @@ -412,6 +414,7 @@ type DictionaryBuilder interface { AppendArray(arrow.Array) error AppendIndices([]int, []bool) ResetFull() + DictionarySize() int } type dictionaryBuilder struct { @@ -622,6 +625,22 @@ func NewDictionaryBuilderWithDict(mem memory.Allocator, dt *arrow.DictionaryType } } return ret + case arrow.DECIMAL32: + ret := &Decimal32DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Decimal32)); err != nil { + panic(err) + } + } + return ret + case arrow.DECIMAL64: + ret := &Decimal64DictionaryBuilder{bldr} + if init != nil { + if err = ret.InsertDictValues(init.(*Decimal64)); err != nil { + panic(err) + } + } + return ret case arrow.DECIMAL128: ret := &Decimal128DictionaryBuilder{bldr} if init != nil { @@ -739,7 +758,7 @@ func (b *dictionaryBuilder) UnmarshalJSON(data []byte) error { } if delim, ok := t.(json.Delim); !ok || delim != '[' { - return fmt.Errorf("dictionary builder must upack from json array, found %s", delim) + return fmt.Errorf("dictionary builder must unpack from json array, found %s", delim) } return b.Unmarshal(dec) @@ -905,6 +924,16 @@ func getvalFn(arr arrow.Array) func(i int) interface{} { return func(i int) interface{} { return typedarr.Value(i) } case *String: return func(i int) interface{} { return typedarr.Value(i) } + case *Decimal32: + return func(i int) interface{} { + val := typedarr.Value(i) + return (*(*[arrow.Decimal32SizeBytes]byte)(unsafe.Pointer(&val)))[:] + } + case *Decimal64: + return func(i int) interface{} { + val := typedarr.Value(i) + return (*(*[arrow.Decimal64SizeBytes]byte)(unsafe.Pointer(&val)))[:] + } case *Decimal128: return func(i int) interface{} { val := typedarr.Value(i) @@ -1004,6 +1033,10 @@ func (b *dictionaryBuilder) AppendIndices(indices []int, valid []bool) { } } +func (b *dictionaryBuilder) DictionarySize() int { + return b.memoTable.Size() +} + type NullDictionaryBuilder struct { dictionaryBuilder } @@ -1389,6 +1422,42 @@ func (b *FixedSizeBinaryDictionaryBuilder) InsertDictValues(arr *FixedSizeBinary return } +type Decimal32DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Decimal32DictionaryBuilder) Append(v decimal.Decimal32) error { + return b.appendValue((*(*[arrow.Decimal32SizeBytes]byte)(unsafe.Pointer(&v)))[:]) +} +func (b *Decimal32DictionaryBuilder) InsertDictValues(arr *Decimal32) (err error) { + data := arrow.Decimal32Traits.CastToBytes(arr.values) + for len(data) > 0 { + if err = b.insertDictValue(data[:arrow.Decimal32SizeBytes]); err != nil { + break + } + data = data[arrow.Decimal32SizeBytes:] + } + return +} + +type Decimal64DictionaryBuilder struct { + dictionaryBuilder +} + +func (b *Decimal64DictionaryBuilder) Append(v decimal.Decimal64) error { + return b.appendValue((*(*[arrow.Decimal64SizeBytes]byte)(unsafe.Pointer(&v)))[:]) +} +func (b *Decimal64DictionaryBuilder) InsertDictValues(arr *Decimal64) (err error) { + data := arrow.Decimal64Traits.CastToBytes(arr.values) + for len(data) > 0 { + if err = b.insertDictValue(data[:arrow.Decimal64SizeBytes]); err != nil { + break + } + data = data[arrow.Decimal64SizeBytes:] + } + return +} + type Decimal128DictionaryBuilder struct { dictionaryBuilder } @@ -1533,7 +1602,7 @@ type DictionaryUnifier interface { // values, an error will be returned instead. The new unified dictionary // is returned. GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, error) - // Release should be called to clean up any allocated scrach memo-table used + // Release should be called to clean up any allocated scratch memo-table used // for building the unified dictionary. Release() } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/diff.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/diff.go index 026a27b9..9320ec98 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/diff.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) // Edit represents one entry in the edit script to compare two arrays. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/doc.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/encoded.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/encoded.go index bf4a942c..81c375c9 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/encoded.go @@ -23,12 +23,12 @@ import ( "reflect" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/encoded" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/encoded" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" + "github.com/apache/arrow-go/v18/internal/utils" ) // RunEndEncoded represents an array containing two children: @@ -150,19 +150,19 @@ func (r *RunEndEncoded) LogicalRunEndsArray(mem memory.Allocator) arrow.Array { case *Int16: for _, v := range e.Int16Values()[physOffset : physOffset+physLength] { v -= int16(r.data.offset) - v = int16(utils.MinInt(int(v), r.data.length)) + v = int16(utils.Min(int(v), r.data.length)) bldr.(*Int16Builder).Append(v) } case *Int32: for _, v := range e.Int32Values()[physOffset : physOffset+physLength] { v -= int32(r.data.offset) - v = int32(utils.MinInt(int(v), r.data.length)) + v = int32(utils.Min(int(v), r.data.length)) bldr.(*Int32Builder).Append(v) } case *Int64: for _, v := range e.Int64Values()[physOffset : physOffset+physLength] { v -= int64(r.data.offset) - v = int64(utils.MinInt(int(v), r.data.length)) + v = int64(utils.Min(int(v), r.data.length)) bldr.(*Int64Builder).Append(v) } } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/extension.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/extension.go index 03e8c173..d1a28350 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/extension.go @@ -20,9 +20,9 @@ import ( "fmt" "reflect" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // ExtensionArray is the interface that needs to be implemented to handle diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/array/extension_builder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/extension_builder.go new file mode 100644 index 00000000..9442ac01 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/extension_builder.go @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import "github.com/apache/arrow-go/v18/arrow/memory" + +// CustomExtensionBuilder is an interface that custom extension types may implement to provide a custom builder +// instead of the underlying storage type's builder when array.NewBuilder is called with that type. +type CustomExtensionBuilder interface { + NewBuilder(memory.Allocator) Builder +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixed_size_list.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/fixed_size_list.go index 62c32138..84036f94 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixed_size_list.go @@ -22,11 +22,11 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // FixedSizeList represents an immutable sequence of N array values. @@ -162,25 +162,38 @@ func (a *FixedSizeList) MarshalJSON() ([]byte, error) { } type FixedSizeListBuilder struct { - builder - - etype arrow.DataType // data type of the list's elements. - n int32 // number of elements in the fixed-size list. - values Builder // value builder for the list's elements. + baseListBuilder + n int32 // number of elements in the fixed-size list. } // NewFixedSizeListBuilder returns a builder, using the provided memory allocator. // The created list builder will create a list whose elements will be of type etype. func NewFixedSizeListBuilder(mem memory.Allocator, n int32, etype arrow.DataType) *FixedSizeListBuilder { return &FixedSizeListBuilder{ - builder: builder{refCount: 1, mem: mem}, - etype: etype, - n: n, - values: NewBuilder(mem, etype), + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, etype), + dt: arrow.FixedSizeListOf(n, etype), + }, + n, + } +} + +// NewFixedSizeListBuilderWithField returns a builder similarly to +// NewFixedSizeListBuilder, but it accepts a child rather than just a datatype +// to ensure nullability context is preserved. +func NewFixedSizeListBuilderWithField(mem memory.Allocator, n int32, field arrow.Field) *FixedSizeListBuilder { + return &FixedSizeListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, field.Type), + dt: arrow.FixedSizeListOfField(n, field), + }, + n, } } -func (b *FixedSizeListBuilder) Type() arrow.DataType { return arrow.FixedSizeListOf(b.n, b.etype) } +func (b *FixedSizeListBuilder) Type() arrow.DataType { return b.dt } // Release decreases the reference count by 1. // When the reference count goes to zero, the memory is freed. @@ -296,7 +309,7 @@ func (b *FixedSizeListBuilder) newData() (data *Data) { defer values.Release() data = NewData( - arrow.FixedSizeListOf(b.n, b.etype), b.length, + b.dt, b.length, []*memory.Buffer{b.nullBitmap}, []arrow.ArrayData{values.Data()}, b.nulls, @@ -336,7 +349,7 @@ func (b *FixedSizeListBuilder) UnmarshalOne(dec *json.Decoder) error { default: return &json.UnmarshalTypeError{ Value: fmt.Sprint(t), - Struct: arrow.FixedSizeListOf(b.n, b.etype).String(), + Struct: b.dt.String(), } } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binary.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binary.go index 5466156d..7049c9c0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binary.go @@ -22,8 +22,8 @@ import ( "fmt" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/internal/json" ) // A type which represents an immutable sequence of fixed-length binary strings. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binarybuilder.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binarybuilder.go index ba4b474a..02e72a25 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/fixedsize_binarybuilder.go @@ -23,10 +23,10 @@ import ( "reflect" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // A FixedSizeBinaryBuilder is used to build a FixedSizeBinary array using the Append methods. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16.go similarity index 87% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/float16.go index de499e26..6b0e820f 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16.go @@ -20,9 +20,9 @@ import ( "fmt" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/internal/json" ) // A type which represents an immutable sequence of Float16 values. @@ -87,10 +87,20 @@ func (a *Float16) GetOneForMarshal(i int) interface{} { func (a *Float16) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i, v := range a.values { - if a.IsValid(i) { - vals[i] = v.Float32() - } else { + if !a.IsValid(i) { vals[i] = nil + continue + } + + switch { + case v.IsNaN(): + vals[i] = "NaN" + case v.IsInf() && !v.Signbit(): + vals[i] = "+Inf" + case v.IsInf() && v.Signbit(): + vals[i] = "-Inf" + default: + vals[i] = v.Float32() } } return json.Marshal(vals) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16_builder.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/float16_builder.go index f96ab603..93dbfbc0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/float16_builder.go @@ -23,12 +23,12 @@ import ( "strconv" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) type Float16Builder struct { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/interval.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/interval.go index ff059c92..324647e8 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/interval.go @@ -23,11 +23,11 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) func NewIntervalData(data arrow.ArrayData) arrow.Array { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/json_reader.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/json_reader.go index e09717c4..7835b280 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/json_reader.go @@ -22,10 +22,10 @@ import ( "io" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) type Option func(config) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/list.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/list.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/list.go index d8d8b8c7..e80bc896 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/list.go @@ -19,15 +19,14 @@ package array import ( "bytes" "fmt" - "math" "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) type ListLike interface { @@ -926,7 +925,7 @@ func (a *LargeListView) Release() { a.values.Release() } -// Acessors for offsets and sizes to make ListView and LargeListView validation generic. +// Accessors for offsets and sizes to make ListView and LargeListView validation generic. type offsetsAndSizes interface { offsetAt(slot int64) int64 sizeAt(slot int64) int64 @@ -1411,118 +1410,19 @@ func (b *baseListViewBuilder) UnmarshalJSON(data []byte) error { return b.Unmarshal(dec) } -// Pre-conditions: -// -// input.DataType() is ListViewType -// input.Len() > 0 && input.NullN() != input.Len() -func minListViewOffset32(input arrow.ArrayData) int32 { - var bitmap []byte - if input.Buffers()[0] != nil { - bitmap = input.Buffers()[0].Bytes() - } - offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] - sizes := arrow.Int32Traits.CastFromBytes(input.Buffers()[2].Bytes())[input.Offset():] - - isNull := func(i int) bool { - return bitmap != nil && bitutil.BitIsNotSet(bitmap, input.Offset()+i) - } - - // It's very likely that the first non-null non-empty list-view starts at - // offset 0 of the child array. - i := 0 - for i < input.Len() && (isNull(i) || sizes[i] == 0) { - i += 1 - } - if i >= input.Len() { - return 0 - } - minOffset := offsets[i] - if minOffset == 0 { - // early exit: offset 0 found already - return 0 - } - - // Slow path: scan the buffers entirely. - i += 1 - for ; i < input.Len(); i += 1 { - if isNull(i) { - continue - } - offset := offsets[i] - if offset < minOffset && sizes[i] > 0 { - minOffset = offset - } - } - return minOffset -} - -// Find the maximum offset+size in a LIST_VIEW array. +// Find the minimum offset+size in a LIST_VIEW/LARGE_LIST_VIEW array. // // Pre-conditions: // -// input.DataType() is ListViewType -// input.Len() > 0 && input.NullN() != input.Len() -func maxListViewOffset32(input arrow.ArrayData) int { - inputOffset := input.Offset() - var bitmap []byte - if input.Buffers()[0] != nil { - bitmap = input.Buffers()[0].Bytes() - } - offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[inputOffset:] - sizes := arrow.Int32Traits.CastFromBytes(input.Buffers()[2].Bytes())[inputOffset:] - - isNull := func(i int) bool { - return bitmap != nil && bitutil.BitIsNotSet(bitmap, inputOffset+i) - } - - i := input.Len() - 1 // safe because input.Len() > 0 - for i != 0 && (isNull(i) || sizes[i] == 0) { - i -= 1 - } - offset := offsets[i] - size := sizes[i] - if i == 0 { - if isNull(i) || sizes[i] == 0 { - return 0 - } else { - return int(offset + size) - } - } - - values := input.Children()[0] - maxEnd := int(offsets[i] + sizes[i]) - if maxEnd == values.Len() { - // Early-exit: maximum possible view-end found already. - return maxEnd - } - - // Slow path: scan the buffers entirely. - for ; i >= 0; i -= 1 { - offset := offsets[i] - size := sizes[i] - if size > 0 && !isNull(i) { - if int(offset+size) > maxEnd { - maxEnd = int(offset + size) - if maxEnd == values.Len() { - return maxEnd - } - } - } - } - return maxEnd -} - -// Pre-conditions: -// -// input.DataType() is LargeListViewType +// input.DataType() is ListViewType if Offset=int32 or LargeListViewType if Offset=int64 // input.Len() > 0 && input.NullN() != input.Len() -func minLargeListViewOffset64(input arrow.ArrayData) int64 { +func minListViewOffset[Offset int32 | int64](input arrow.ArrayData) Offset { var bitmap []byte if input.Buffers()[0] != nil { bitmap = input.Buffers()[0].Bytes() } - offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] - sizes := arrow.Int64Traits.CastFromBytes(input.Buffers()[2].Bytes())[input.Offset():] + offsets := arrow.GetData[Offset](input.Buffers()[1].Bytes())[input.Offset():] + sizes := arrow.GetData[Offset](input.Buffers()[2].Bytes())[input.Offset():] isNull := func(i int) bool { return bitmap != nil && bitutil.BitIsNotSet(bitmap, input.Offset()+i) @@ -1557,27 +1457,25 @@ func minLargeListViewOffset64(input arrow.ArrayData) int64 { return minOffset } -// Find the maximum offset+size in a LARGE_LIST_VIEW array. +// Find the maximum offset+size in a LIST_VIEW/LARGE_LIST_VIEW array. // // Pre-conditions: // -// input.DataType() is LargeListViewType +// input.DataType() is ListViewType if Offset=int32 or LargeListViewType if Offset=int64 // input.Len() > 0 && input.NullN() != input.Len() -func maxLargeListViewOffset64(input arrow.ArrayData) int64 { +func maxListViewEnd[Offset int32 | int64](input arrow.ArrayData) Offset { inputOffset := input.Offset() var bitmap []byte if input.Buffers()[0] != nil { bitmap = input.Buffers()[0].Bytes() } - offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[inputOffset:] - sizes := arrow.Int64Traits.CastFromBytes(input.Buffers()[2].Bytes())[inputOffset:] + offsets := arrow.GetData[Offset](input.Buffers()[1].Bytes())[inputOffset:] + sizes := arrow.GetData[Offset](input.Buffers()[2].Bytes())[inputOffset:] isNull := func(i int) bool { return bitmap != nil && bitutil.BitIsNotSet(bitmap, inputOffset+i) } - // It's very likely that the first non-null non-empty list-view starts at - // offset zero, so we check that first and potentially early-return a 0. i := input.Len() - 1 // safe because input.Len() > 0 for i != 0 && (isNull(i) || sizes[i] == 0) { i -= 1 @@ -1592,15 +1490,9 @@ func maxLargeListViewOffset64(input arrow.ArrayData) int64 { } } - if offset > math.MaxInt64-size { - // Early-exit: 64-bit overflow detected. This is not possible on a - // valid list-view, but we return the maximum possible value to - // avoid undefined behavior. - return math.MaxInt64 - } values := input.Children()[0] maxEnd := offsets[i] + sizes[i] - if maxEnd == int64(values.Len()) { + if maxEnd == Offset(values.Len()) { // Early-exit: maximum possible view-end found already. return maxEnd } @@ -1611,14 +1503,8 @@ func maxLargeListViewOffset64(input arrow.ArrayData) int64 { size := sizes[i] if size > 0 && !isNull(i) { if offset+size > maxEnd { - if offset > math.MaxInt64-size { - // 64-bit overflow detected. This is not possible on a valid list-view, - // but we saturate maxEnd to the maximum possible value to avoid - // undefined behavior. - return math.MaxInt64 - } maxEnd = offset + size - if maxEnd == int64(values.Len()) { + if maxEnd == Offset(values.Len()) { return maxEnd } } @@ -1634,11 +1520,11 @@ func rangeOfValuesUsed(input arrow.ArrayData) (int, int) { var minOffset, maxEnd int switch input.DataType().(type) { case *arrow.ListViewType: - minOffset = int(minListViewOffset32(input)) - maxEnd = maxListViewOffset32(input) + minOffset = int(minListViewOffset[int32](input)) + maxEnd = int(maxListViewEnd[int32](input)) case *arrow.LargeListViewType: - minOffset = int(minLargeListViewOffset64(input)) - maxEnd = int(maxLargeListViewOffset64(input)) + minOffset = int(minListViewOffset[int64](input)) + maxEnd = int(maxListViewEnd[int64](input)) case *arrow.ListType: offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] minOffset = int(offsets[0]) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/map.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/map.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/map.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/map.go index 9945a90c..5609ccd0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/map.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/map.go @@ -20,9 +20,9 @@ import ( "bytes" "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // Map represents an immutable sequence of Key/Value structs. It is a diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/null.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/null.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/null.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/null.go index 150a1030..76e56a49 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/null.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/null.go @@ -23,10 +23,10 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // Null represents an immutable, degenerate array with no physical storage. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go index a3e11015..7e94fe5c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go @@ -20,11 +20,12 @@ package array import ( "fmt" + "math" "strconv" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/internal/json" ) // A type which represents an immutable sequence of int64 values. @@ -100,11 +101,13 @@ func (a *Int64) GetOneForMarshal(i int) interface{} { func (a *Int64) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = a.values[i] } else { vals[i] = nil } + } return json.Marshal(vals) @@ -195,11 +198,13 @@ func (a *Uint64) GetOneForMarshal(i int) interface{} { func (a *Uint64) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = a.values[i] } else { vals[i] = nil } + } return json.Marshal(vals) @@ -290,11 +295,23 @@ func (a *Float64) GetOneForMarshal(i int) interface{} { func (a *Float64) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { - if a.IsValid(i) { - vals[i] = a.values[i] - } else { + if !a.IsValid(i) { vals[i] = nil + continue } + + f := a.Value(i) + switch { + case math.IsNaN(f): + vals[i] = "NaN" + case math.IsInf(f, 1): + vals[i] = "+Inf" + case math.IsInf(f, -1): + vals[i] = "-Inf" + default: + vals[i] = f + } + } return json.Marshal(vals) @@ -385,11 +402,13 @@ func (a *Int32) GetOneForMarshal(i int) interface{} { func (a *Int32) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = a.values[i] } else { vals[i] = nil } + } return json.Marshal(vals) @@ -480,11 +499,13 @@ func (a *Uint32) GetOneForMarshal(i int) interface{} { func (a *Uint32) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = a.values[i] } else { vals[i] = nil } + } return json.Marshal(vals) @@ -575,11 +596,21 @@ func (a *Float32) GetOneForMarshal(i int) interface{} { func (a *Float32) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { - if a.IsValid(i) { - vals[i] = a.values[i] - } else { + if !a.IsValid(i) { vals[i] = nil + continue } + + f := a.Value(i) + v := strconv.FormatFloat(float64(f), 'g', -1, 32) + + switch v { + case "NaN", "+Inf", "-Inf": + vals[i] = v + default: + vals[i] = f + } + } return json.Marshal(vals) @@ -670,11 +701,13 @@ func (a *Int16) GetOneForMarshal(i int) interface{} { func (a *Int16) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = a.values[i] } else { vals[i] = nil } + } return json.Marshal(vals) @@ -765,11 +798,13 @@ func (a *Uint16) GetOneForMarshal(i int) interface{} { func (a *Uint16) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = a.values[i] } else { vals[i] = nil } + } return json.Marshal(vals) @@ -860,11 +895,13 @@ func (a *Int8) GetOneForMarshal(i int) interface{} { func (a *Int8) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data } else { vals[i] = nil } + } return json.Marshal(vals) @@ -955,11 +992,13 @@ func (a *Uint8) GetOneForMarshal(i int) interface{} { func (a *Uint8) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + if a.IsValid(i) { vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data } else { vals[i] = nil } + } return json.Marshal(vals) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go.tmpl similarity index 87% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go.tmpl index 34d17fbf..df07f205 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numeric.gen.go.tmpl @@ -21,8 +21,8 @@ import ( "strings" "time" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/internal/json" ) {{range .In}} @@ -133,11 +133,45 @@ func (a *{{.Name}}) MarshalJSON() ([]byte, error) { {{else -}} vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { + {{if (eq .Name "Float32") -}} + if !a.IsValid(i) { + vals[i] = nil + continue + } + + f := a.Value(i) + v := strconv.FormatFloat(float64(f), 'g', -1, 32) + + switch v { + case "NaN", "+Inf", "-Inf": + vals[i] = v + default: + vals[i] = f + } + {{else if (eq .Name "Float64") -}} + if !a.IsValid(i) { + vals[i] = nil + continue + } + + f := a.Value(i) + switch { + case math.IsNaN(f): + vals[i] = "NaN" + case math.IsInf(f, 1): + vals[i] = "+Inf" + case math.IsInf(f, -1): + vals[i] = "-Inf" + default: + vals[i] = f + } + {{else}} if a.IsValid(i) { {{ if (eq .Size "1") }}vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data{{ else }}vals[i] = a.values[i]{{ end }} } else { vals[i] = nil } + {{end}} } {{end}} return json.Marshal(vals) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go index 7f01180f..1618dba0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go @@ -27,11 +27,11 @@ import ( "sync/atomic" "time" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) type Int64Builder struct { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go.tmpl similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go.tmpl index cf663c03..e84e095c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen.go.tmpl @@ -17,11 +17,11 @@ package array import ( - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) {{range .In}} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen_test.go.tmpl similarity index 90% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen_test.go.tmpl index bc8c9933..a5d58f48 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/numericbuilder.gen_test.go.tmpl @@ -19,9 +19,9 @@ package array_test import ( "testing" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/memory" "github.com/stretchr/testify/assert" ) @@ -271,6 +271,29 @@ func Test{{.Name}}Builder_Resize(t *testing.T) { ab.Resize(32) assert.Equal(t, 5, ab.Len()) } + +func Test{{.Name}}BuilderUnmarshalJSON(t *testing.T) { + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + + bldr := array.New{{.Name}}Builder(mem) + defer bldr.Release() + + jsonstr := `[0, 1, "+Inf", 2, 3, "NaN", "NaN", 4, 5, "-Inf"]` + + err := bldr.UnmarshalJSON([]byte(jsonstr)) + assert.NoError(t, err) + + arr := bldr.New{{.Name}}Array() + defer arr.Release() + + assert.NotNil(t, arr) + + assert.False(t, math.IsInf(float64(arr.Value(0)), 0), arr.Value(0)) + assert.True(t, math.IsInf(float64(arr.Value(2)), 1), arr.Value(2)) + assert.True(t, math.IsNaN(float64(arr.Value(5))), arr.Value(5)) +} + {{end}} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/record.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/record.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/record.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/record.go index 0b0fe4c3..b8041e27 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/record.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/record.go @@ -22,10 +22,10 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // RecordReader reads a stream of records. @@ -50,7 +50,7 @@ type simpleRecords struct { } // NewRecordReader returns a simple iterator over the given slice of records. -func NewRecordReader(schema *arrow.Schema, recs []arrow.Record) (*simpleRecords, error) { +func NewRecordReader(schema *arrow.Schema, recs []arrow.Record) (RecordReader, error) { rs := &simpleRecords{ refCount: 1, schema: schema, @@ -124,7 +124,7 @@ type simpleRecord struct { // // NewRecord panics if the columns and schema are inconsistent. // NewRecord panics if rows is larger than the height of the columns. -func NewRecord(schema *arrow.Schema, cols []arrow.Array, nrows int64) *simpleRecord { +func NewRecord(schema *arrow.Schema, cols []arrow.Array, nrows int64) arrow.Record { rec := &simpleRecord{ refCount: 1, schema: schema, @@ -185,7 +185,7 @@ func (rec *simpleRecord) validate() error { return nil } - if len(rec.arrs) != len(rec.schema.Fields()) { + if len(rec.arrs) != rec.schema.NumFields() { return fmt.Errorf("arrow/array: number of columns/fields mismatch") } @@ -285,11 +285,11 @@ func NewRecordBuilder(mem memory.Allocator, schema *arrow.Schema) *RecordBuilder refCount: 1, mem: mem, schema: schema, - fields: make([]Builder, len(schema.Fields())), + fields: make([]Builder, schema.NumFields()), } - for i, f := range schema.Fields() { - b.fields[i] = NewBuilder(b.mem, f.Type) + for i := 0; i < schema.NumFields(); i++ { + b.fields[i] = NewBuilder(b.mem, schema.Field(i).Type) } return b @@ -397,8 +397,8 @@ func (b *RecordBuilder) UnmarshalJSON(data []byte) error { } } - for i, f := range b.schema.Fields() { - if !keylist[f.Name] { + for i := 0; i < b.schema.NumFields(); i++ { + if !keylist[b.schema.Field(i).Name] { b.fields[i].AppendNull() } } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/string.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/string.go similarity index 73% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/string.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/string.go index 86e27c97..5197e77f 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/string.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/string.go @@ -23,11 +23,17 @@ import ( "strings" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) +type StringLike interface { + arrow.Array + Value(int) string + ValueLen(int) int +} + // String represents an immutable sequence of variable-length UTF-8 strings. type String struct { array @@ -63,7 +69,7 @@ func (a *String) ValueStr(i int) string { // ValueOffset returns the offset of the value at index i. func (a *String) ValueOffset(i int) int { - if i < 0 || i > a.array.data.length { + if i < 0 || i >= a.array.data.length { panic("arrow/array: index out of range") } return int(a.offsets[i+a.array.data.offset]) @@ -220,6 +226,14 @@ func (a *LargeString) ValueOffset64(i int) int64 { return a.ValueOffset(i) } +func (a *LargeString) ValueLen(i int) int { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + beg := a.array.data.offset + i + return int(a.offsets[beg+1] - a.offsets[beg]) +} + func (a *LargeString) ValueOffsets() []int64 { beg := a.array.data.offset end := beg + a.array.data.length + 1 @@ -310,6 +324,113 @@ func arrayEqualLargeString(left, right *LargeString) bool { return true } +type StringView struct { + array + values []arrow.ViewHeader + dataBuffers []*memory.Buffer +} + +func NewStringViewData(data arrow.ArrayData) *StringView { + a := &StringView{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the String with a different set of Data. +func (a *StringView) Reset(data arrow.ArrayData) { + a.setData(data.(*Data)) +} + +func (a *StringView) setData(data *Data) { + if len(data.buffers) < 2 { + panic("len(data.buffers) < 2") + } + a.array.setData(data) + + if valueData := data.buffers[1]; valueData != nil { + a.values = arrow.ViewHeaderTraits.CastFromBytes(valueData.Bytes()) + } + + a.dataBuffers = data.buffers[2:] +} + +func (a *StringView) ValueHeader(i int) *arrow.ViewHeader { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + return &a.values[a.array.data.offset+i] +} + +func (a *StringView) Value(i int) string { + s := a.ValueHeader(i) + if s.IsInline() { + return s.InlineString() + } + start := s.BufferOffset() + buf := a.dataBuffers[s.BufferIndex()] + value := buf.Bytes()[start : start+int32(s.Len())] + return *(*string)(unsafe.Pointer(&value)) +} + +func (a *StringView) ValueLen(i int) int { + s := a.ValueHeader(i) + return s.Len() +} + +func (a *StringView) String() string { + var o strings.Builder + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + switch { + case a.IsNull(i): + o.WriteString(NullValueStr) + default: + fmt.Fprintf(&o, "%q", a.Value(i)) + } + } + o.WriteString("]") + return o.String() +} + +func (a *StringView) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.Value(i) +} + +func (a *StringView) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.Value(i) +} + +func (a *StringView) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := 0; i < a.Len(); i++ { + vals[i] = a.GetOneForMarshal(i) + } + return json.Marshal(vals) +} + +func arrayEqualStringView(left, right *StringView) bool { + leftBufs, rightBufs := left.dataBuffers, right.dataBuffers + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !left.ValueHeader(i).Equals(leftBufs, right.ValueHeader(i), rightBufs) { + return false + } + } + return true +} + // A StringBuilder is used to build a String array using the Append methods. type StringBuilder struct { *BinaryBuilder @@ -344,10 +465,6 @@ func (b *StringBuilder) Value(i int) string { return string(b.BinaryBuilder.Value(i)) } -// func (b *StringBuilder) UnsafeAppend(v string) { -// b.BinaryBuilder.UnsafeAppend([]byte(v)) -// } - // NewArray creates a String array from the memory buffers used by the builder and resets the StringBuilder // so it can be used to build a new array. func (b *StringBuilder) NewArray() arrow.Array { @@ -441,10 +558,6 @@ func (b *LargeStringBuilder) Value(i int) string { return string(b.BinaryBuilder.Value(i)) } -// func (b *LargeStringBuilder) UnsafeAppend(v string) { -// b.BinaryBuilder.UnsafeAppend([]byte(v)) -// } - // NewArray creates a String array from the memory buffers used by the builder and resets the StringBuilder // so it can be used to build a new array. func (b *LargeStringBuilder) NewArray() arrow.Array { @@ -504,9 +617,87 @@ func (b *LargeStringBuilder) UnmarshalJSON(data []byte) error { return b.Unmarshal(dec) } +type StringViewBuilder struct { + *BinaryViewBuilder +} + +func NewStringViewBuilder(mem memory.Allocator) *StringViewBuilder { + bldr := &StringViewBuilder{ + BinaryViewBuilder: NewBinaryViewBuilder(mem), + } + bldr.dtype = arrow.BinaryTypes.StringView + return bldr +} + +func (b *StringViewBuilder) Append(v string) { + b.BinaryViewBuilder.AppendString(v) +} + +func (b *StringViewBuilder) AppendValues(v []string, valid []bool) { + b.BinaryViewBuilder.AppendStringValues(v, valid) +} + +func (b *StringViewBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case string: + b.Append(v) + case []byte: + b.BinaryViewBuilder.Append(v) + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf([]byte{}), + Offset: dec.InputOffset(), + } + } + return nil +} + +func (b *StringViewBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *StringViewBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary view builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +func (b *StringViewBuilder) NewArray() arrow.Array { + return b.NewStringViewArray() +} + +func (b *StringViewBuilder) NewStringViewArray() (a *StringView) { + data := b.newData() + a = NewStringViewData(data) + data.Release() + return +} + type StringLikeBuilder interface { Builder Append(string) + AppendValues([]string, []bool) UnsafeAppend([]byte) ReserveData(int) } @@ -514,8 +705,14 @@ type StringLikeBuilder interface { var ( _ arrow.Array = (*String)(nil) _ arrow.Array = (*LargeString)(nil) + _ arrow.Array = (*StringView)(nil) _ Builder = (*StringBuilder)(nil) _ Builder = (*LargeStringBuilder)(nil) + _ Builder = (*StringViewBuilder)(nil) _ StringLikeBuilder = (*StringBuilder)(nil) _ StringLikeBuilder = (*LargeStringBuilder)(nil) + _ StringLikeBuilder = (*StringViewBuilder)(nil) + _ StringLike = (*String)(nil) + _ StringLike = (*LargeString)(nil) + _ StringLike = (*StringView)(nil) ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/struct.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/struct.go index 248a25bf..7f65f8d2 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/struct.go @@ -23,11 +23,11 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // Struct represents an ordered sequence of relative types. @@ -43,6 +43,40 @@ func NewStructArray(cols []arrow.Array, names []string) (*Struct, error) { return NewStructArrayWithNulls(cols, names, nil, 0, 0) } +// NewStructArrayWithFields builds a new Struct Array using the passed columns +// and provided fields. As opposed to NewStructArray, this allows you to provide +// the full fields to utilize for the struct column instead of just the names. +func NewStructArrayWithFields(cols []arrow.Array, fields []arrow.Field) (*Struct, error) { + if len(cols) != len(fields) { + return nil, fmt.Errorf("%w: mismatching number of fields and child arrays", arrow.ErrInvalid) + } + if len(cols) == 0 { + return nil, fmt.Errorf("%w: can't infer struct array length with 0 child arrays", arrow.ErrInvalid) + } + + length := cols[0].Len() + children := make([]arrow.ArrayData, len(cols)) + for i, c := range cols { + if length != c.Len() { + return nil, fmt.Errorf("%w: mismatching child array lengths", arrow.ErrInvalid) + } + if !arrow.TypeEqual(fields[i].Type, c.DataType()) { + return nil, fmt.Errorf("%w: mismatching data type for child #%d, field says '%s', got '%s'", + arrow.ErrInvalid, i, fields[i].Type, c.DataType()) + } + if !fields[i].Nullable && c.NullN() > 0 { + return nil, fmt.Errorf("%w: field says not-nullable, child #%d has nulls", + arrow.ErrInvalid, i) + } + + children[i] = c.Data() + } + + data := NewData(arrow.StructOf(fields...), length, []*memory.Buffer{nil}, children, 0, 0) + defer data.Release() + return NewStructData(data), nil +} + // NewStructArrayWithNulls is like NewStructArray as a convenience function, // but also takes in a null bitmap, the number of nulls, and an optional offset // to use for creating the Struct Array. @@ -224,7 +258,7 @@ func NewStructBuilder(mem memory.Allocator, dtype *arrow.StructType) *StructBuil b := &StructBuilder{ builder: builder{refCount: 1, mem: mem}, dtype: dtype, - fields: make([]Builder, len(dtype.Fields())), + fields: make([]Builder, dtype.NumFields()), } for i, f := range dtype.Fields() { b.fields[i] = NewBuilder(b.mem, f.Type) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/table.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/table.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/table.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/table.go index 6456992e..95ac67f2 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/table.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/table.go @@ -23,8 +23,8 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) // NewColumnSlice returns a new zero-copy slice of the column with the indicated @@ -99,7 +99,7 @@ type simpleTable struct { // // NewTable panics if the columns and schema are inconsistent. // NewTable panics if rows is larger than the height of the columns. -func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) *simpleTable { +func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) arrow.Table { tbl := simpleTable{ refCount: 1, rows: rows, @@ -136,12 +136,12 @@ func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) *simpleTabl // - len(schema.Fields) != len(data) // - the total length of each column's array slice (ie: number of rows // in the column) aren't the same for all columns. -func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) *simpleTable { - if len(data) != len(schema.Fields()) { +func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) arrow.Table { + if len(data) != schema.NumFields() { panic("array/table: mismatch in number of columns and data for creating a table") } - cols := make([]arrow.Column, len(schema.Fields())) + cols := make([]arrow.Column, schema.NumFields()) for i, arrs := range data { field := schema.Field(i) chunked := arrow.NewChunked(field.Type, arrs) @@ -175,9 +175,9 @@ func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) *simpleTable // NewTableFromRecords returns a new basic, non-lazy in-memory table. // // NewTableFromRecords panics if the records and schema are inconsistent. -func NewTableFromRecords(schema *arrow.Schema, recs []arrow.Record) *simpleTable { +func NewTableFromRecords(schema *arrow.Schema, recs []arrow.Record) arrow.Table { arrs := make([]arrow.Array, len(recs)) - cols := make([]arrow.Column, len(schema.Fields())) + cols := make([]arrow.Column, schema.NumFields()) defer func(cols []arrow.Column) { for i := range cols { @@ -224,7 +224,7 @@ func (tbl *simpleTable) NumCols() int64 { return int64(len(tbl.cols) func (tbl *simpleTable) Column(i int) *arrow.Column { return &tbl.cols[i] } func (tbl *simpleTable) validate() { - if len(tbl.cols) != len(tbl.schema.Fields()) { + if len(tbl.cols) != tbl.schema.NumFields() { panic(errors.New("arrow/array: table schema mismatch")) } for i, col := range tbl.cols { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/timestamp.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/timestamp.go index 2928b1fc..37359db1 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/timestamp.go @@ -24,11 +24,11 @@ import ( "sync/atomic" "time" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/json" ) // Timestamp represents an immutable sequence of arrow.Timestamp values. @@ -91,16 +91,15 @@ func (a *Timestamp) ValueStr(i int) string { return NullValueStr } - dt := a.DataType().(*arrow.TimestampType) - z, _ := dt.GetZone() - return a.values[i].ToTime(dt.Unit).In(z).Format("2006-01-02 15:04:05.999999999Z0700") + toTime, _ := a.DataType().(*arrow.TimestampType).GetToTimeFunc() + return toTime(a.values[i]).Format("2006-01-02 15:04:05.999999999Z0700") } func (a *Timestamp) GetOneForMarshal(i int) interface{} { - if a.IsNull(i) { - return nil + if val := a.ValueStr(i); val != NullValueStr { + return val } - return a.values[i].ToTime(a.DataType().(*arrow.TimestampType).Unit).Format("2006-01-02 15:04:05.999999999") + return nil } func (a *Timestamp) MarshalJSON() ([]byte, error) { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/union.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/union.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/union.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/union.go index 869355ac..6f3a9a6e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/union.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/union.go @@ -25,12 +25,12 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/bitutils" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/bitutils" + "github.com/apache/arrow-go/v18/internal/json" ) // Union is a convenience interface to encompass both Sparse and Dense @@ -69,7 +69,7 @@ type Union interface { // or arrow.DenseMode. Mode() arrow.UnionMode // Field returns the requested child array for this union. Returns nil if a - // non-existent position is passed in. + // nonexistent position is passed in. // // The appropriate child for an index can be retrieved with Field(ChildID(index)) Field(pos int) arrow.Array @@ -896,7 +896,7 @@ func NewEmptySparseUnionBuilder(mem memory.Allocator) *SparseUnionBuilder { // children and type codes. Builders will be constructed for each child // using the fields in typ func NewSparseUnionBuilder(mem memory.Allocator, typ *arrow.SparseUnionType) *SparseUnionBuilder { - children := make([]Builder, len(typ.Fields())) + children := make([]Builder, typ.NumFields()) for i, f := range typ.Fields() { children[i] = NewBuilder(mem, f.Type) defer children[i].Release() @@ -980,7 +980,7 @@ func (b *SparseUnionBuilder) AppendEmptyValues(n int) { // // After appending to the corresponding child builder, all other child // builders should have a null or empty value appended to them (although -// this is not enfoced and any value is theoretically allowed and will be +// this is not enforced and any value is theoretically allowed and will be // ignored). func (b *SparseUnionBuilder) Append(nextType arrow.UnionTypeCode) { b.typesBuilder.AppendValue(nextType) @@ -1129,7 +1129,7 @@ func NewEmptyDenseUnionBuilder(mem memory.Allocator) *DenseUnionBuilder { // children and type codes. Builders will be constructed for each child // using the fields in typ func NewDenseUnionBuilder(mem memory.Allocator, typ *arrow.DenseUnionType) *DenseUnionBuilder { - children := make([]Builder, 0, len(typ.Fields())) + children := make([]Builder, 0, typ.NumFields()) defer func() { for _, child := range children { child.Release() diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/util.go b/vendor/github.com/apache/arrow-go/v18/arrow/array/util.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/util.go rename to vendor/github.com/apache/arrow-go/v18/arrow/array/util.go index 54d15a80..c8316ab4 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/util.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/array/util.go @@ -22,11 +22,11 @@ import ( "io" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/hashing" - "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/hashing" + "github.com/apache/arrow-go/v18/internal/json" ) func min(a, b int) int { @@ -428,7 +428,7 @@ func (n *nullArrayFactory) create() *Data { } if nf, ok := dt.(arrow.NestedType); ok { - childData = make([]arrow.ArrayData, len(nf.Fields())) + childData = make([]arrow.ArrayData, nf.NumFields()) } switch dt := dt.(type) { @@ -504,7 +504,7 @@ func (n *nullArrayFactory) create() *Data { return out } -func (n *nullArrayFactory) createChild(dt arrow.DataType, i, length int) *Data { +func (n *nullArrayFactory) createChild(_ arrow.DataType, i, length int) *Data { childFactory := &nullArrayFactory{ mem: n.mem, dt: n.dt.(arrow.NestedType).Fields()[i].Type, len: length, buf: n.buf} @@ -521,3 +521,7 @@ func MakeArrayOfNull(mem memory.Allocator, dt arrow.DataType, length int) arrow. defer data.Release() return MakeFromData(data) } + +func stripNulls(s string) string { + return strings.TrimRight(s, "\x00") +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go b/vendor/github.com/apache/arrow-go/v18/arrow/arrio/arrio.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go rename to vendor/github.com/apache/arrow-go/v18/arrow/arrio/arrio.go index 466a93a6..22fabc22 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/arrio/arrio.go @@ -22,7 +22,7 @@ import ( "errors" "io" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) // Reader is the interface that wraps the Read method. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/Makefile diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_noasm.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_noasm.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_ppc64le.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_ppc64le.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmap_ops_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmaps.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmaps.go index 2e9c0601..c6b156e7 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitmaps.go @@ -22,9 +22,9 @@ import ( "math/bits" "unsafe" - "github.com/apache/arrow/go/v14/arrow/endian" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" ) // BitmapReader is a simple bitmap reader for a byte slice. @@ -360,7 +360,7 @@ func (bm *BitmapWordWriter) PutNextTrailingByte(b byte, validBits int) { bm.bitmap = bm.bitmap[1:] } else { debug.Assert(validBits > 0 && validBits < 8, "invalid valid bits in bitmap word writer") - debug.Assert(BytesForBits(int64(bm.offset+validBits)) <= int64(len(bm.bitmap)), "writing trailiing byte outside of bounds of bitmap") + debug.Assert(BytesForBits(int64(bm.offset+validBits)) <= int64(len(bm.bitmap)), "writing trailing byte outside of bounds of bitmap") wr := NewBitmapWriter(bm.bitmap, int(bm.offset), validBits) for i := 0; i < validBits; i++ { if b&0x01 != 0 { @@ -465,20 +465,24 @@ type bitOp struct { var ( bitAndOp = bitOp{ - opWord: func(l, r uint64) uint64 { return l & r }, - opByte: func(l, r byte) byte { return l & r }, + opWord: func(l, r uint64) uint64 { return l & r }, + opByte: func(l, r byte) byte { return l & r }, + opAligned: alignedBitAndGo, } bitOrOp = bitOp{ - opWord: func(l, r uint64) uint64 { return l | r }, - opByte: func(l, r byte) byte { return l | r }, + opWord: func(l, r uint64) uint64 { return l | r }, + opByte: func(l, r byte) byte { return l | r }, + opAligned: alignedBitOrGo, } bitAndNotOp = bitOp{ - opWord: func(l, r uint64) uint64 { return l &^ r }, - opByte: func(l, r byte) byte { return l &^ r }, + opWord: func(l, r uint64) uint64 { return l &^ r }, + opByte: func(l, r byte) byte { return l &^ r }, + opAligned: alignedBitAndNotGo, } bitXorOp = bitOp{ - opWord: func(l, r uint64) uint64 { return l ^ r }, - opByte: func(l, r byte) byte { return l ^ r }, + opWord: func(l, r uint64) uint64 { return l ^ r }, + opByte: func(l, r byte) byte { return l ^ r }, + opAligned: alignedBitXorGo, } ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitutil.go similarity index 86% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitutil.go index a4a1519b..47af2b2e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitutil.go @@ -19,10 +19,9 @@ package bitutil import ( "math" "math/bits" - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/memory" ) var ( @@ -99,8 +98,6 @@ func countSetBitsWithOffset(buf []byte, offset, n int) int { count := 0 beg := offset - end := offset + n - begU8 := roundUp(beg, uint64SizeBits) init := min(n, begU8-beg) @@ -110,27 +107,8 @@ func countSetBitsWithOffset(buf []byte, offset, n int) int { } } - nU64 := (n - init) / uint64SizeBits - begU64 := begU8 / uint64SizeBits - endU64 := begU64 + nU64 - bufU64 := bytesToUint64(buf) - if begU64 < len(bufU64) { - for _, v := range bufU64[begU64:endU64] { - count += bits.OnesCount64(v) - } - } - - // FIXME: use a fallback to bits.OnesCount8 - // before counting the tail bits. - - tail := beg + init + nU64*uint64SizeBits - for i := tail; i < end; i++ { - if BitIsSet(buf, i) { - count++ - } - } - - return count + begU64 := BytesForBits(int64(beg + init)) + return count + CountSetBits(buf[begU64:], 0, n-init) } func roundUp(v, f int) int { @@ -149,15 +127,6 @@ const ( uint64SizeBits = uint64SizeBytes * 8 ) -func bytesToUint64(b []byte) []uint64 { - if cap(b) < uint64SizeBytes { - return nil - } - - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - return unsafe.Slice((*uint64)(unsafe.Pointer(h.Data)), cap(b)/uint64SizeBytes)[:len(b)/uint64SizeBytes] -} - var ( // PrecedingBitmask is a convenience set of values as bitmasks for checking // prefix bits of a byte diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitutil_bytes.go similarity index 71% rename from vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitutil_bytes.go index a71287fa..09dd5cbc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/bitutil_bytes.go @@ -14,10 +14,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package array +//go:build go1.20 || tinygo -// ExtensionBuilderWrapper is an interface that you need to implement in your custom extension type if you want to provide a customer builder as well. -// See example in ./arrow/internal/testing/types/extension_types.go -type ExtensionBuilderWrapper interface { - NewBuilder(bldr *ExtensionBuilder) Builder +package bitutil + +import ( + "unsafe" +) + +func bytesToUint64(b []byte) []uint64 { + if len(b) < uint64SizeBytes { + return nil + } + + ptr := unsafe.SliceData(b) + if ptr == nil { + return nil + } + + return unsafe.Slice((*uint64)(unsafe.Pointer(ptr)), + len(b)/uint64SizeBytes) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/endian_default.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/endian_default.go index 9f5d3cdc..ecbbaa70 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/endian_default.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !s390x // +build !s390x package bitutil diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/endian_s390x.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go rename to vendor/github.com/apache/arrow-go/v18/arrow/bitutil/endian_s390x.go index a9bba439..e99605f5 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/bitutil/endian_s390x.go @@ -18,7 +18,7 @@ package bitutil import ( "math/bits" - "unsafe" + "unsafe" ) var toFromLEFunc = bits.ReverseBytes64 diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compare.go b/vendor/github.com/apache/arrow-go/v18/arrow/compare.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compare.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compare.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/arithmetic.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/arithmetic.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/arithmetic.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/arithmetic.go index 2fb95f06..95936678 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/arithmetic.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/arithmetic.go @@ -22,12 +22,12 @@ import ( "context" "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/scalar" ) type ( @@ -678,8 +678,8 @@ func RegisterScalarArithmetic(reg FunctionRegistry) { // the allocated space is for duration (an int64) but we // wrote the time32 - time32 as if the output was time32 // so a quick copy in reverse expands the int32s to int64. - rawData := exec.GetData[int32](out.Buffers[1].Buf) - outData := exec.GetData[int64](out.Buffers[1].Buf) + rawData := arrow.GetData[int32](out.Buffers[1].Buf) + outData := arrow.GetData[int64](out.Buffers[1].Buf) for i := out.Len - 1; i >= 0; i-- { outData[i] = int64(rawData[i]) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/cast.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/cast.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/cast.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/cast.go index 8b720a2b..bd239b58 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/cast.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/cast.go @@ -23,11 +23,11 @@ import ( "fmt" "sync" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" ) var ( @@ -266,8 +266,8 @@ func CastStruct(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) opts = ctx.State.(kernels.CastState) inType = batch.Values[0].Array.Type.(*arrow.StructType) outType = out.Type.(*arrow.StructType) - inFieldCount = len(inType.Fields()) - outFieldCount = len(outType.Fields()) + inFieldCount = inType.NumFields() + outFieldCount = outType.NumFields() ) fieldsToSelect := make([]int, outFieldCount) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/datum.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/datum.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/datum.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/datum.go index 1d3c1b4d..438f10a3 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/datum.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/datum.go @@ -21,9 +21,9 @@ package compute import ( "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/scalar" ) //go:generate go run golang.org/x/tools/cmd/stringer -type=DatumKind -linecomment @@ -250,7 +250,7 @@ func (d *TableDatum) Equals(other Datum) bool { // an array.Table gets a TableDatum // a scalar.Scalar gets a ScalarDatum // -// Anything else is passed to scalar.MakeScalar and recieves a scalar +// Anything else is passed to scalar.MakeScalar and receives a scalar // datum of that appropriate type. func NewDatum(value interface{}) Datum { switch v := value.(type) { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/datumkind_string.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/datumkind_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/datumkind_string.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/datumkind_string.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/doc.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/doc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/doc.go index 53a164e6..7c763cb1 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/doc.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/doc.go @@ -23,7 +23,7 @@ // is an attempt to provide for those users, and in general create a // native-go arrow compute engine. // -// The overwhemling majority of things in this package require go1.18 as +// The overwhelming majority of things in this package require go1.18 as // it utilizes generics. The files in this package and its sub-packages // are all excluded from being built by go versions lower than 1.18 so // that the larger Arrow module itself is still compatible with go1.17. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/exec.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/exec.go index 84e3310c..d37d95a7 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec.go @@ -22,9 +22,9 @@ import ( "context" "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) func haveChunkedArray(values []Datum) bool { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/hash_util.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/hash_util.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/hash_util.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/hash_util.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/kernel.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/kernel.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/kernel.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/kernel.go index 327426da..d7de176c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/kernel.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/kernel.go @@ -24,10 +24,10 @@ import ( "hash/maphash" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" "golang.org/x/exp/slices" ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/span.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/span.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/span.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/span.go index b6d240fa..2585d9a6 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/span.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/span.go @@ -19,15 +19,14 @@ package exec import ( - "reflect" "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/scalar" ) // BufferSpan is a lightweight Buffer holder for ArraySpans that does not @@ -250,22 +249,6 @@ func (a *ArraySpan) resizeChildren(i int) { } } -// convenience function for populating the offsets buffer from a scalar -// value's size. -func setOffsetsForScalar[T int32 | int64](span *ArraySpan, buf []T, valueSize int64, bufidx int) { - buf[0] = 0 - buf[1] = T(valueSize) - - b := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - s := (*reflect.SliceHeader)(unsafe.Pointer(&span.Buffers[bufidx].Buf)) - s.Data = b.Data - s.Len = 2 * int(unsafe.Sizeof(T(0))) - s.Cap = s.Len - - span.Buffers[bufidx].Owner = nil - span.Buffers[bufidx].SelfAlloc = false -} - // FillFromScalar populates this ArraySpan as if it were a 1 length array // with the single value equal to the passed in Scalar. func (a *ArraySpan) FillFromScalar(val scalar.Scalar) { @@ -633,7 +616,7 @@ func FillZeroLength(dt arrow.DataType, span *ArraySpan) { return } - span.resizeChildren(len(nt.Fields())) + span.resizeChildren(nt.NumFields()) for i, f := range nt.Fields() { FillZeroLength(f.Type, &span.Children[i]) } diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/math.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/span_offsets.go similarity index 62% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/math.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/span_offsets.go index 62cf96ce..d2d03988 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/utils/math.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/span_offsets.go @@ -14,36 +14,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -package utils +//go:build go1.20 || tinygo -// Min is a convenience Min function for int64 -func Min(a, b int64) int64 { - if a < b { - return a - } - return b -} +package exec -// MinInt is a convenience Min function for int -func MinInt(a, b int) int { - if a < b { - return a - } - return b -} +import ( + "unsafe" +) -// Max is a convenience Max function for int64 -func Max(a, b int64) int64 { - if a > b { - return a - } - return b -} +// convenience function for populating the offsets buffer from a scalar +// value's size. +func setOffsetsForScalar[T int32 | int64](span *ArraySpan, buf []T, valueSize int64, bufidx int) { + buf[0] = 0 + buf[1] = T(valueSize) + + span.Buffers[bufidx].Buf = unsafe.Slice((*byte)(unsafe.Pointer(unsafe.SliceData(buf))), + 2*int(unsafe.Sizeof(T(0)))) -// MaxInt is a convenience Max function for int -func MaxInt(a, b int) int { - if a > b { - return a - } - return b + span.Buffers[bufidx].Owner = nil + span.Buffers[bufidx].SelfAlloc = false } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/utils.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/utils.go similarity index 52% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/utils.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/utils.go index 6d83b75d..e3685205 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/utils.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/exec/utils.go @@ -21,96 +21,21 @@ package exec import ( "fmt" "math" - "reflect" "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/memory" "golang.org/x/exp/constraints" "golang.org/x/exp/slices" ) -// IntTypes is a type constraint for raw values represented as signed -// integer types by Arrow. We aren't just using constraints.Signed -// because we don't want to include the raw `int` type here whose size -// changes based on the architecture (int32 on 32-bit architectures and -// int64 on 64-bit architectures). -// -// This will also cover types like MonthInterval or the time types -// as their underlying types are int32 and int64 which will get covered -// by using the ~ -type IntTypes interface { - ~int8 | ~int16 | ~int32 | ~int64 -} - -// UintTypes is a type constraint for raw values represented as unsigned -// integer types by Arrow. We aren't just using constraints.Unsigned -// because we don't want to include the raw `uint` type here whose size -// changes based on the architecture (uint32 on 32-bit architectures and -// uint64 on 64-bit architectures). We also don't want to include uintptr -type UintTypes interface { - ~uint8 | ~uint16 | ~uint32 | ~uint64 -} - -// FloatTypes is a type constraint for raw values for representing -// floating point values in Arrow. This consists of constraints.Float and -// float16.Num -type FloatTypes interface { - float16.Num | constraints.Float -} - -// NumericTypes is a type constraint for just signed/unsigned integers -// and float32/float64. -type NumericTypes interface { - IntTypes | UintTypes | constraints.Float -} - -// DecimalTypes is a type constraint for raw values representing larger -// decimal type values in Arrow, specifically decimal128 and decimal256. -type DecimalTypes interface { - decimal128.Num | decimal256.Num -} - -// FixedWidthTypes is a type constraint for raw values in Arrow that -// can be represented as FixedWidth byte slices. Specifically this is for -// using Go generics to easily re-type a byte slice to a properly-typed -// slice. Booleans are excluded here since they are represented by Arrow -// as a bitmap and thus the buffer can't be just reinterpreted as a []bool -type FixedWidthTypes interface { - IntTypes | UintTypes | - FloatTypes | DecimalTypes | - arrow.DayTimeInterval | arrow.MonthDayNanoInterval -} - -type TemporalTypes interface { - arrow.Date32 | arrow.Date64 | arrow.Time32 | arrow.Time64 | - arrow.Timestamp | arrow.Duration | arrow.DayTimeInterval | - arrow.MonthInterval | arrow.MonthDayNanoInterval -} - -func GetValues[T FixedWidthTypes](data arrow.ArrayData, i int) []T { - if data.Buffers()[i] == nil || data.Buffers()[i].Len() == 0 { - return nil - } - ret := unsafe.Slice((*T)(unsafe.Pointer(&data.Buffers()[i].Bytes()[0])), data.Offset()+data.Len()) - return ret[data.Offset():] -} - -func GetOffsets[T int32 | int64](data arrow.ArrayData, i int) []T { - ret := unsafe.Slice((*T)(unsafe.Pointer(&data.Buffers()[i].Bytes()[0])), data.Offset()+data.Len()+1) - return ret[data.Offset():] -} - // GetSpanValues returns a properly typed slice by reinterpreting // the buffer at index i using unsafe.Slice. This will take into account // the offset of the given ArraySpan. -func GetSpanValues[T FixedWidthTypes](span *ArraySpan, i int) []T { +func GetSpanValues[T arrow.FixedWidthType](span *ArraySpan, i int) []T { if len(span.Buffers[i].Buf) == 0 { return nil } @@ -126,16 +51,6 @@ func GetSpanOffsets[T int32 | int64](span *ArraySpan, i int) []T { return ret[span.Offset:] } -func GetBytes[T FixedWidthTypes](in []T) []byte { - var z T - return unsafe.Slice((*byte)(unsafe.Pointer(&in[0])), len(in)*int(unsafe.Sizeof(z))) -} - -func GetData[T FixedWidthTypes](in []byte) []T { - var z T - return unsafe.Slice((*T)(unsafe.Pointer(&in[0])), len(in)/int(unsafe.Sizeof(z))) -} - func Min[T constraints.Ordered](a, b T) T { if a < b { return a @@ -165,59 +80,22 @@ func OptionsInit[T any](_ *KernelCtx, args KernelInitArgs) (KernelState, error) arrow.ErrInvalid) } -var typMap = map[reflect.Type]arrow.DataType{ - reflect.TypeOf(false): arrow.FixedWidthTypes.Boolean, - reflect.TypeOf(int8(0)): arrow.PrimitiveTypes.Int8, - reflect.TypeOf(int16(0)): arrow.PrimitiveTypes.Int16, - reflect.TypeOf(int32(0)): arrow.PrimitiveTypes.Int32, - reflect.TypeOf(int64(0)): arrow.PrimitiveTypes.Int64, - reflect.TypeOf(uint8(0)): arrow.PrimitiveTypes.Uint8, - reflect.TypeOf(uint16(0)): arrow.PrimitiveTypes.Uint16, - reflect.TypeOf(uint32(0)): arrow.PrimitiveTypes.Uint32, - reflect.TypeOf(uint64(0)): arrow.PrimitiveTypes.Uint64, - reflect.TypeOf(float32(0)): arrow.PrimitiveTypes.Float32, - reflect.TypeOf(float64(0)): arrow.PrimitiveTypes.Float64, - reflect.TypeOf(string("")): arrow.BinaryTypes.String, - reflect.TypeOf(arrow.Date32(0)): arrow.FixedWidthTypes.Date32, - reflect.TypeOf(arrow.Date64(0)): arrow.FixedWidthTypes.Date64, - reflect.TypeOf(true): arrow.FixedWidthTypes.Boolean, - reflect.TypeOf(float16.Num{}): arrow.FixedWidthTypes.Float16, - reflect.TypeOf([]byte{}): arrow.BinaryTypes.Binary, -} - -// GetDataType returns the appropriate arrow.DataType for the given type T -// only for non-parametric types. This uses a map and reflection internally -// so don't call this in a tight loop, instead call this once and then use -// a closure with the result. -func GetDataType[T NumericTypes | bool | string | []byte | float16.Num]() arrow.DataType { - var z T - return typMap[reflect.TypeOf(z)] -} - -// GetType returns the appropriate arrow.Type type T, only for non-parameteric -// types. This uses a map and reflection internally so don't call this in -// a tight loop, instead call it once and then use a closure with the result. -func GetType[T NumericTypes | bool | string]() arrow.Type { - var z T - return typMap[reflect.TypeOf(z)].ID() -} - -type arrayBuilder[T NumericTypes | bool] interface { +type arrayBuilder[T arrow.NumericType | bool] interface { array.Builder Append(T) AppendValues([]T, []bool) } -func ArrayFromSlice[T NumericTypes | bool](mem memory.Allocator, data []T) arrow.Array { - bldr := array.NewBuilder(mem, typMap[reflect.TypeOf(data).Elem()]).(arrayBuilder[T]) +func ArrayFromSlice[T arrow.NumericType | bool](mem memory.Allocator, data []T) arrow.Array { + bldr := array.NewBuilder(mem, arrow.GetDataType[T]()).(arrayBuilder[T]) defer bldr.Release() bldr.AppendValues(data, nil) return bldr.NewArray() } -func ArrayFromSliceWithValid[T NumericTypes | bool](mem memory.Allocator, data []T, valid []bool) arrow.Array { - bldr := array.NewBuilder(mem, typMap[reflect.TypeOf(data).Elem()]).(arrayBuilder[T]) +func ArrayFromSliceWithValid[T arrow.NumericType | bool](mem memory.Allocator, data []T, valid []bool) arrow.Array { + bldr := array.NewBuilder(mem, arrow.GetDataType[T]()).(arrayBuilder[T]) defer bldr.Release() bldr.AppendValues(data, valid) @@ -323,7 +201,7 @@ func (c *ChunkResolver) Resolve(idx int64) (chunk, index int64) { } type arrayTypes interface { - FixedWidthTypes | TemporalTypes | bool | string | []byte + arrow.FixedWidthType | arrow.TemporalType | bool | string | []byte } type ArrayIter[T arrayTypes] interface { @@ -345,11 +223,11 @@ func (b *BoolIter) Next() (out bool) { return } -type PrimitiveIter[T FixedWidthTypes] struct { +type PrimitiveIter[T arrow.FixedWidthType] struct { Values []T } -func NewPrimitiveIter[T FixedWidthTypes](arr *ArraySpan) ArrayIter[T] { +func NewPrimitiveIter[T arrow.FixedWidthType](arr *ArraySpan) ArrayIter[T] { return &PrimitiveIter[T]{Values: GetSpanValues[T](arr, 1)} } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/executor.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/executor.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/executor.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/executor.go index 6da7ed12..54c65adc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/executor.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/executor.go @@ -25,14 +25,14 @@ import ( "runtime" "sync" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/scalar" ) // ExecCtx holds simple contextual information for execution @@ -171,6 +171,8 @@ func addComputeDataPrealloc(dt arrow.DataType, widths []bufferPrealloc) []buffer return append(widths, bufferPrealloc{bitWidth: 32, addLen: 1}) case arrow.LARGE_BINARY, arrow.LARGE_STRING, arrow.LARGE_LIST: return append(widths, bufferPrealloc{bitWidth: 64, addLen: 1}) + case arrow.STRING_VIEW, arrow.BINARY_VIEW: + return append(widths, bufferPrealloc{bitWidth: arrow.ViewHeaderSizeBytes * 8}) } return widths } @@ -389,7 +391,7 @@ func inferBatchLength(values []Datum) (length int64, allSame bool) { type KernelExecutor interface { // Init must be called *after* the kernel's init method and any // KernelState must be set into the KernelCtx *before* calling - // this Init method. This is to faciliate the case where + // this Init method. This is to facilitate the case where // Init may be expensive and does not need to be called // again for each execution of the kernel. For example, // the same lookup table can be re-used for all scanned batches @@ -585,8 +587,7 @@ func (s *scalarExecutor) executeSpans(data chan<- Datum) (err error) { if s.preallocContiguous { // make one big output alloc - prealloc := s.prepareOutput(int(s.iterLen)) - output = *prealloc + output := s.prepareOutput(int(s.iterLen)) output.Offset = 0 var resultOffset int64 @@ -596,15 +597,19 @@ func (s *scalarExecutor) executeSpans(data chan<- Datum) (err error) { break } output.SetSlice(resultOffset, input.Len) - err = s.executeSingleSpan(&input, &output) + err = s.executeSingleSpan(&input, output) resultOffset = nextOffset } if err != nil { - prealloc.Release() + output.Release() return } - return s.emitResult(prealloc, data) + if output.Offset != 0 { + output.SetSlice(0, s.iterLen) + } + + return s.emitResult(output, data) } // fully preallocating, but not contiguously @@ -1007,9 +1012,10 @@ func (v *vectorExecutor) WrapResults(ctx context.Context, out <-chan Datum, hasC case <-ctx.Done(): return nil case output = <-out: - if output == nil { + if output == nil || ctx.Err() != nil { return nil } + // if the inputs contained at least one chunked array // then we want to return chunked output if hasChunked { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/expression.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/expression.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/expression.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/expression.go index 9f20c970..88e1dde3 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/expression.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/expression.go @@ -28,14 +28,14 @@ import ( "strconv" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/ipc" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/ipc" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/scalar" ) var hashSeed = maphash.MakeSeed() diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/fieldref.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/fieldref.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/fieldref.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/fieldref.go index ee6f3994..55ec3372 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/fieldref.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/fieldref.go @@ -20,15 +20,13 @@ import ( "errors" "fmt" "hash/maphash" - "math/bits" "reflect" "strconv" "strings" "unicode" - "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" ) var ( @@ -168,21 +166,6 @@ func (f FieldPath) GetColumn(batch arrow.Record) (arrow.Array, error) { return f.getArray(batch.Columns()) } -func (f FieldPath) hash(h *maphash.Hash) { - raw := (*reflect.SliceHeader)(unsafe.Pointer(&f)).Data - - var b []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - s.Data = raw - if bits.UintSize == 32 { - s.Len = arrow.Int32Traits.BytesRequired(len(f)) - } else { - s.Len = arrow.Int64Traits.BytesRequired(len(f)) - } - s.Cap = s.Len - h.Write(b) -} - func (f FieldPath) findAll(fields []arrow.Field) []FieldPath { _, err := f.GetFieldFromSlice(fields) if err == nil { @@ -282,31 +265,31 @@ type refImpl interface { // // Nested fields can be referenced as well, given the schema: // -// arrow.NewSchema([]arrow.Field{ -// {Name: "a", Type: arrow.StructOf(arrow.Field{Name: "n", Type: arrow.Null})}, -// {Name: "b", Type: arrow.PrimitiveTypes.Int32}, -// }) +// arrow.NewSchema([]arrow.Field{ +// {Name: "a", Type: arrow.StructOf(arrow.Field{Name: "n", Type: arrow.Null})}, +// {Name: "b", Type: arrow.PrimitiveTypes.Int32}, +// }) // // the following all indicate the nested field named "n": // -// FieldRefPath(FieldPath{0, 0}) -// FieldRefList("a", 0) -// FieldRefList("a", "n") -// FieldRefList(0, "n") -// NewFieldRefFromDotPath(".a[0]") +// FieldRefPath(FieldPath{0, 0}) +// FieldRefList("a", 0) +// FieldRefList("a", "n") +// FieldRefList(0, "n") +// NewFieldRefFromDotPath(".a[0]") // // FieldPaths matching a FieldRef are retrieved with the FindAll* functions // Multiple matches are possible because field names may be duplicated within // a schema. For example: // -// aIsAmbiguous := arrow.NewSchema([]arrow.Field{ -// {Name: "a", Type: arrow.PrimitiveTypes.Int32}, -// {Name: "a", Type: arrow.PrimitiveTypes.Float32}, -// }) -// matches := FieldRefName("a").FindAll(aIsAmbiguous) -// assert.Len(matches, 2) -// assert.True(matches[0].Get(aIsAmbiguous).Equals(aIsAmbiguous.Field(0)) -// assert.True(matches[1].Get(aIsAmbiguous).Equals(aIsAmbiguous.Field(1)) +// aIsAmbiguous := arrow.NewSchema([]arrow.Field{ +// {Name: "a", Type: arrow.PrimitiveTypes.Int32}, +// {Name: "a", Type: arrow.PrimitiveTypes.Float32}, +// }) +// matches := FieldRefName("a").FindAll(aIsAmbiguous) +// assert.Len(matches, 2) +// assert.True(matches[0].Get(aIsAmbiguous).Equals(aIsAmbiguous.Field(0)) +// assert.True(matches[1].Get(aIsAmbiguous).Equals(aIsAmbiguous.Field(1)) type FieldRef struct { impl refImpl } @@ -346,17 +329,18 @@ func FieldRefList(elems ...interface{}) FieldRef { // NewFieldRefFromDotPath parses a dot path into a field ref. // // dot_path = '.' name -// | '[' digit+ ']' -// | dot_path+ +// +// | '[' digit+ ']' +// | dot_path+ // // Examples // -// ".alpha" => FieldRefName("alpha") -// "[2]" => FieldRefIndex(2) -// ".beta[3]" => FieldRefList("beta", 3) -// "[5].gamma.delta[7]" => FieldRefList(5, "gamma", "delta", 7) -// ".hello world" => FieldRefName("hello world") -// `.\[y\]\\tho\.\` => FieldRef(`[y]\tho.\`) +// ".alpha" => FieldRefName("alpha") +// "[2]" => FieldRefIndex(2) +// ".beta[3]" => FieldRefList("beta", 3) +// "[5].gamma.delta[7]" => FieldRefList(5, "gamma", "delta", 7) +// ".hello world" => FieldRefName("hello world") +// `.\[y\]\\tho\.\` => FieldRef(`[y]\tho.\`) // // Note: when parsing a name, a '\' preceding any other character will be // dropped from the resulting name. therefore if a name must contain the characters diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/compute/fieldref_hash.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/fieldref_hash.go new file mode 100644 index 00000000..02efc46d --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/fieldref_hash.go @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.20 || tinygo + +package compute + +import ( + "hash/maphash" + "math/bits" + "unsafe" + + "github.com/apache/arrow-go/v18/arrow" +) + +func (f FieldPath) hash(h *maphash.Hash) { + raw := unsafe.Pointer(unsafe.SliceData(f)) + var byteLen int + if bits.UintSize == 32 { + byteLen = arrow.Int32Traits.BytesRequired(len(f)) + } else { + byteLen = arrow.Int64Traits.BytesRequired(len(f)) + } + + h.Write(unsafe.Slice((*byte)(raw), byteLen)) +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/funckind_string.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/funckind_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/funckind_string.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/funckind_string.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/functions.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/functions.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/functions.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/functions.go index a1905f91..11d8e685 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/functions.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/functions.go @@ -23,8 +23,8 @@ import ( "fmt" "strings" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" ) type Function interface { @@ -179,7 +179,7 @@ func (b *baseFunction) checkArity(nargs int) error { return nil } -// kernelType is a type contstraint interface that is used for funcImpl +// kernelType is a type constraint interface that is used for funcImpl // generic definitions. It will be extended as other kernel types // are defined. // @@ -227,7 +227,7 @@ func (fi *funcImpl[KT]) Kernels() []*KT { // A ScalarFunction is a function that executes element-wise operations // on arrays or scalars, and therefore whose results generally do not -// depent on the order of the values in the arguments. Accepts and returns +// depend on the order of the values in the arguments. Accepts and returns // arrays that are all of the same size. These functions roughly correspond // to the functions used in most SQL expressions. type ScalarFunction struct { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/Makefile b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/Makefile similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/Makefile rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/Makefile index ac00bd83..4e8ddd85 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/Makefile +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/Makefile @@ -21,14 +21,15 @@ C2GOASM=c2goasm CC=clang-11 CXX=clang++-11 C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=5000 \ - -fno-asynchronous-unwind-tables -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib -I../../../../internal/utils/_lib + -fno-asynchronous-unwind-tables -fno-exceptions -fno-rtti -O3 -fno-builtin -fno-jump-tables \ + -fno-math-errno -funsafe-math-optimizations -fno-rounding-math -fno-trapping-math -I_lib -I../../../../internal/utils/_lib ASM_FLAGS_AVX2=-mavx2 -mfma ASM_FLAGS_SSE4=-msse4 ASM_FLAGS_BMI2=-mbmi2 ASM_FLAGS_POPCNT=-mpopcnt C_FLAGS_NEON=-O3 -fvectorize -mllvm -force-vector-width=16 -fno-asynchronous-unwind-tables -mno-red-zone -mstackrealign -fno-exceptions \ - -fno-rtti -fno-builtin -ffast-math -fno-jump-tables -I_lib -I../../../../internal/utils/_lib + -fno-rtti -fno-builtin -fno-math-errno -funsafe-math-optimizations -fno-rounding-math -fno-trapping-math -fno-jump-tables -I_lib -I../../../../internal/utils/_lib GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go') ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go') diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic.go index 67e80af7..3e12663b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic.go @@ -23,12 +23,12 @@ import ( "math" "math/bits" - "github.com/JohnCGriffin/overflow" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/internal/utils" "golang.org/x/exp/constraints" ) @@ -81,7 +81,7 @@ const ( OpLogbChecked ) -func mulWithOverflow[T exec.IntTypes | exec.UintTypes](a, b T) (T, error) { +func mulWithOverflow[T arrow.IntType | arrow.UintType](a, b T) (T, error) { min, max := MinOf[T](), MaxOf[T]() switch { case a > 0: @@ -107,7 +107,7 @@ func mulWithOverflow[T exec.IntTypes | exec.UintTypes](a, b T) (T, error) { return a * b, nil } -func getGoArithmeticBinary[OutT, Arg0T, Arg1T exec.NumericTypes](op func(a Arg0T, b Arg1T, e *error) OutT) binaryOps[OutT, Arg0T, Arg1T] { +func getGoArithmeticBinary[OutT, Arg0T, Arg1T arrow.NumericType](op func(a Arg0T, b Arg1T, e *error) OutT) binaryOps[OutT, Arg0T, Arg1T] { return binaryOps[OutT, Arg0T, Arg1T]{ arrArr: func(_ *exec.KernelCtx, left []Arg0T, right []Arg1T, out []OutT) error { var err error @@ -143,7 +143,7 @@ var ( errLogNeg = fmt.Errorf("%w: logarithm of negative number", arrow.ErrInvalid) ) -func getGoArithmeticOpIntegral[InT, OutT exec.UintTypes | exec.IntTypes](op ArithmeticOp) exec.ArrayKernelExec { +func getGoArithmeticOpIntegral[InT, OutT arrow.UintType | arrow.IntType](op ArithmeticOp) exec.ArrayKernelExec { switch op { case OpAdd: return ScalarBinary(getGoArithmeticBinary(func(a, b InT, _ *error) OutT { return OutT(a + b) })) @@ -178,7 +178,7 @@ func getGoArithmeticOpIntegral[InT, OutT exec.UintTypes | exec.IntTypes](op Arit if SizeOf[InT]() == SizeOf[OutT]() { return ScalarUnary(func(_ *exec.KernelCtx, arg []InT, out []OutT) error { - in, output := exec.GetBytes(arg), exec.GetBytes(out) + in, output := arrow.GetBytes(arg), arrow.GetBytes(out) copy(output, in) return nil }) @@ -314,7 +314,7 @@ func getGoArithmeticOpIntegral[InT, OutT exec.UintTypes | exec.IntTypes](op Arit } if SizeOf[InT]() == SizeOf[OutT]() { return ScalarUnary(func(_ *exec.KernelCtx, arg []InT, out []OutT) error { - in, output := exec.GetBytes(arg), exec.GetBytes(out) + in, output := arrow.GetBytes(arg), arrow.GetBytes(out) copy(output, in) return nil }) @@ -709,7 +709,7 @@ func SubtractDate32(op ArithmeticOp) exec.ArrayKernelExec { case OpSubChecked: return ScalarBinary(getGoArithmeticBinary(func(a, b arrow.Time32, e *error) (result arrow.Duration) { result = arrow.Duration(a) - arrow.Duration(b) - val, ok := overflow.Mul64(int64(result), secondsPerDay) + val, ok := utils.Mul64(int64(result), secondsPerDay) if !ok { *e = errOverflow } @@ -795,7 +795,7 @@ func getArithmeticOpDecimalImpl[T decimal128.Num | decimal256.Num](op Arithmetic return int64(fns.Sign(arg)) }) } - debug.Assert(false, "unimplemented arithemtic op") + debug.Assert(false, "unimplemented arithmetic op") return nil } @@ -837,7 +837,7 @@ func ArithmeticExecSameType(ty arrow.Type, op ArithmeticOp) exec.ArrayKernelExec return nil } -func arithmeticExec[InT exec.IntTypes | exec.UintTypes](oty arrow.Type, op ArithmeticOp) exec.ArrayKernelExec { +func arithmeticExec[InT arrow.IntType | arrow.UintType](oty arrow.Type, op ArithmeticOp) exec.ArrayKernelExec { switch oty { case arrow.INT8: return getArithmeticOpIntegral[InT, int8](op) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_amd64.go similarity index 63% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_amd64.go index 0e78e6c9..2073e62b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_amd64.go @@ -21,63 +21,64 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal/debug" "golang.org/x/exp/constraints" "golang.org/x/sys/cpu" ) -func getAvx2ArithmeticBinaryNumeric[T exec.NumericTypes](op ArithmeticOp) binaryOps[T, T, T] { - typ := exec.GetType[T]() +func getAvx2ArithmeticBinaryNumeric[T arrow.NumericType](op ArithmeticOp) binaryOps[T, T, T] { + typ := arrow.GetType[T]() return binaryOps[T, T, T]{ arrArr: func(_ *exec.KernelCtx, Arg0, Arg1, Out []T) error { - arithmeticAvx2(typ, op, exec.GetBytes(Arg0), exec.GetBytes(Arg1), exec.GetBytes(Out), len(Arg0)) + arithmeticAvx2(typ, op, arrow.GetBytes(Arg0), arrow.GetBytes(Arg1), arrow.GetBytes(Out), len(Arg0)) return nil }, arrScalar: func(_ *exec.KernelCtx, Arg0 []T, Arg1 T, Out []T) error { - arithmeticArrScalarAvx2(typ, op, exec.GetBytes(Arg0), unsafe.Pointer(&Arg1), exec.GetBytes(Out), len(Arg0)) + arithmeticArrScalarAvx2(typ, op, arrow.GetBytes(Arg0), unsafe.Pointer(&Arg1), arrow.GetBytes(Out), len(Arg0)) return nil }, scalarArr: func(_ *exec.KernelCtx, Arg0 T, Arg1, Out []T) error { - arithmeticScalarArrAvx2(typ, op, unsafe.Pointer(&Arg0), exec.GetBytes(Arg1), exec.GetBytes(Out), len(Arg1)) + arithmeticScalarArrAvx2(typ, op, unsafe.Pointer(&Arg0), arrow.GetBytes(Arg1), arrow.GetBytes(Out), len(Arg1)) return nil }, } } -func getSSE4ArithmeticBinaryNumeric[T exec.NumericTypes](op ArithmeticOp) binaryOps[T, T, T] { - typ := exec.GetType[T]() +func getSSE4ArithmeticBinaryNumeric[T arrow.NumericType](op ArithmeticOp) binaryOps[T, T, T] { + typ := arrow.GetType[T]() return binaryOps[T, T, T]{ arrArr: func(_ *exec.KernelCtx, Arg0, Arg1, Out []T) error { - arithmeticSSE4(typ, op, exec.GetBytes(Arg0), exec.GetBytes(Arg1), exec.GetBytes(Out), len(Arg0)) + arithmeticSSE4(typ, op, arrow.GetBytes(Arg0), arrow.GetBytes(Arg1), arrow.GetBytes(Out), len(Arg0)) return nil }, arrScalar: func(_ *exec.KernelCtx, Arg0 []T, Arg1 T, Out []T) error { - arithmeticArrScalarSSE4(typ, op, exec.GetBytes(Arg0), unsafe.Pointer(&Arg1), exec.GetBytes(Out), len(Arg0)) + arithmeticArrScalarSSE4(typ, op, arrow.GetBytes(Arg0), unsafe.Pointer(&Arg1), arrow.GetBytes(Out), len(Arg0)) return nil }, scalarArr: func(_ *exec.KernelCtx, Arg0 T, Arg1, Out []T) error { - arithmeticScalarArrSSE4(typ, op, unsafe.Pointer(&Arg0), exec.GetBytes(Arg1), exec.GetBytes(Out), len(Arg1)) + arithmeticScalarArrSSE4(typ, op, unsafe.Pointer(&Arg0), arrow.GetBytes(Arg1), arrow.GetBytes(Out), len(Arg1)) return nil }, } } -func getArithmeticOpIntegral[InT, OutT exec.UintTypes | exec.IntTypes](op ArithmeticOp) exec.ArrayKernelExec { +func getArithmeticOpIntegral[InT, OutT arrow.UintType | arrow.IntType](op ArithmeticOp) exec.ArrayKernelExec { if cpu.X86.HasAVX2 { switch op { case OpAdd, OpSub, OpMul: return ScalarBinary(getAvx2ArithmeticBinaryNumeric[InT](op)) case OpAbsoluteValue, OpNegate: - typ := exec.GetType[InT]() + typ := arrow.GetType[InT]() return ScalarUnary(func(_ *exec.KernelCtx, arg, out []InT) error { - arithmeticUnaryAvx2(typ, op, exec.GetBytes(arg), exec.GetBytes(out), len(arg)) + arithmeticUnaryAvx2(typ, op, arrow.GetBytes(arg), arrow.GetBytes(out), len(arg)) return nil }) case OpSign: - inType, outType := exec.GetType[InT](), exec.GetType[OutT]() + inType, outType := arrow.GetType[InT](), arrow.GetType[OutT]() return ScalarUnary(func(_ *exec.KernelCtx, arg []InT, out []OutT) error { - arithmeticUnaryDiffTypesAvx2(inType, outType, op, exec.GetBytes(arg), exec.GetBytes(out), len(arg)) + arithmeticUnaryDiffTypesAvx2(inType, outType, op, arrow.GetBytes(arg), arrow.GetBytes(out), len(arg)) return nil }) } @@ -86,15 +87,15 @@ func getArithmeticOpIntegral[InT, OutT exec.UintTypes | exec.IntTypes](op Arithm case OpAdd, OpSub, OpMul: return ScalarBinary(getSSE4ArithmeticBinaryNumeric[InT](op)) case OpAbsoluteValue, OpNegate: - typ := exec.GetType[InT]() + typ := arrow.GetType[InT]() return ScalarUnary(func(ctx *exec.KernelCtx, arg, out []InT) error { - arithmeticUnarySSE4(typ, op, exec.GetBytes(arg), exec.GetBytes(out), len(arg)) + arithmeticUnarySSE4(typ, op, arrow.GetBytes(arg), arrow.GetBytes(out), len(arg)) return nil }) case OpSign: - inType, outType := exec.GetType[InT](), exec.GetType[OutT]() + inType, outType := arrow.GetType[InT](), arrow.GetType[OutT]() return ScalarUnary(func(_ *exec.KernelCtx, arg []InT, out []OutT) error { - arithmeticUnaryDiffTypesSSE4(inType, outType, op, exec.GetBytes(arg), exec.GetBytes(out), len(arg)) + arithmeticUnaryDiffTypesSSE4(inType, outType, op, arrow.GetBytes(arg), arrow.GetBytes(out), len(arg)) return nil }) } @@ -109,38 +110,38 @@ func getArithmeticOpFloating[InT, OutT constraints.Float](op ArithmeticOp) exec. if cpu.X86.HasAVX2 { switch op { case OpAdd, OpSub, OpAddChecked, OpSubChecked, OpMul, OpMulChecked: - if exec.GetType[InT]() != exec.GetType[OutT]() { + if arrow.GetType[InT]() != arrow.GetType[OutT]() { debug.Assert(false, "not implemented") return nil } return ScalarBinary(getAvx2ArithmeticBinaryNumeric[InT](op)) case OpAbsoluteValue, OpAbsoluteValueChecked, OpNegate, OpNegateChecked, OpSign: - if exec.GetType[InT]() != exec.GetType[OutT]() { + if arrow.GetType[InT]() != arrow.GetType[OutT]() { debug.Assert(false, "not implemented") return nil } - typ := exec.GetType[InT]() + typ := arrow.GetType[InT]() return ScalarUnary(func(_ *exec.KernelCtx, arg, out []InT) error { - arithmeticUnaryAvx2(typ, op, exec.GetBytes(arg), exec.GetBytes(out), len(arg)) + arithmeticUnaryAvx2(typ, op, arrow.GetBytes(arg), arrow.GetBytes(out), len(arg)) return nil }) } } else if cpu.X86.HasSSE42 { switch op { case OpAdd, OpSub, OpAddChecked, OpSubChecked, OpMul, OpMulChecked: - if exec.GetType[InT]() != exec.GetType[OutT]() { + if arrow.GetType[InT]() != arrow.GetType[OutT]() { debug.Assert(false, "not implemented") return nil } return ScalarBinary(getSSE4ArithmeticBinaryNumeric[InT](op)) case OpAbsoluteValue, OpAbsoluteValueChecked, OpNegate, OpNegateChecked, OpSign: - if exec.GetType[InT]() != exec.GetType[OutT]() { + if arrow.GetType[InT]() != arrow.GetType[OutT]() { debug.Assert(false, "not implemented") return nil } - typ := exec.GetType[InT]() + typ := arrow.GetType[InT]() return ScalarUnary(func(_ *exec.KernelCtx, arg, out []InT) error { - arithmeticUnarySSE4(typ, op, exec.GetBytes(arg), exec.GetBytes(out), len(arg)) + arithmeticUnarySSE4(typ, op, arrow.GetBytes(arg), arrow.GetBytes(out), len(arg)) return nil }) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go index 29cce783..5dbaa8dc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go index e9b03551..510b24b7 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/basic_arithmetic_noasm.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/basic_arithmetic_noasm.go similarity index 84% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/basic_arithmetic_noasm.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/basic_arithmetic_noasm.go index 4f160a14..74d64b69 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/basic_arithmetic_noasm.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/basic_arithmetic_noasm.go @@ -19,7 +19,8 @@ package kernels import ( - "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" "golang.org/x/exp/constraints" ) @@ -27,6 +28,6 @@ func getArithmeticOpFloating[InT, OutT constraints.Float](op ArithmeticOp) exec. return getGoArithmeticOpFloating[InT, OutT](op) } -func getArithmeticOpIntegral[InT, OutT exec.UintTypes | exec.IntTypes](op ArithmeticOp) exec.ArrayKernelExec { +func getArithmeticOpIntegral[InT, OutT arrow.UintType | arrow.IntType](op ArithmeticOp) exec.ArrayKernelExec { return getGoArithmeticOpIntegral[InT, OutT](op) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/boolean_cast.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/boolean_cast.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/boolean_cast.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/boolean_cast.go index 18d04c84..dbe96f10 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/boolean_cast.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/boolean_cast.go @@ -22,12 +22,12 @@ import ( "strconv" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" ) -func isNonZero[T exec.FixedWidthTypes](ctx *exec.KernelCtx, in []T, out []byte) error { +func isNonZero[T arrow.FixedWidthType](ctx *exec.KernelCtx, in []T, out []byte) error { var zero T for i, v := range in { bitutil.SetBitTo(out, i, v != zero) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast.go index 5a71206b..66f75ad2 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast.go @@ -19,9 +19,9 @@ package kernels import ( - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/compute/exec" ) type CastOptions struct { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric.go index 4e5c5c1d..a1772599 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) var castNumericUnsafe func(itype, otype arrow.Type, in, out []byte, len int) = castNumericGo diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go index 6b28441e..3c55e07c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go index d53a4486..f62b7b96 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" "golang.org/x/sys/cpu" ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s index c54eac44..3d56efc5 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s @@ -10,6 +10,10 @@ TEXT ·_cast_type_numeric_neon(SB), $0-40 MOVD len+32(FP), R4 + // The Go ABI saves the frame pointer register one word below the + // caller's frame. Make room so we don't overwrite it. Needs to stay + // 16-byte aligned + SUB $16, RSP WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! WORD $0x7100181f // cmp w0, #6 WORD $0x910003fd // mov x29, sp @@ -4447,6 +4451,8 @@ LBB0_892: BNE LBB0_892 LBB0_893: WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET LBB0_894: WORD $0x927b6909 // and x9, x8, #0xffffffe0 diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go index 1cbea033..d53f219f 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_temporal.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_temporal.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_temporal.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_temporal.go index 82fce1e3..6d061432 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_temporal.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/cast_temporal.go @@ -24,10 +24,10 @@ import ( "time" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) const millisecondsInDay = 86400000 @@ -112,6 +112,10 @@ func TimestampToDate32(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.Exec return ScalarUnaryNotNull(func(_ *exec.KernelCtx, arg0 arrow.Timestamp, _ *error) arrow.Date32 { tm := fnToTime(arg0) + if _, offset := tm.Zone(); offset != 0 { + // normalize the tm + tm = tm.Add(time.Duration(offset) * time.Second).UTC() + } return arrow.Date32FromTime(tm) })(ctx, batch, out) } @@ -125,6 +129,10 @@ func TimestampToDate64(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.Exec return ScalarUnaryNotNull(func(_ *exec.KernelCtx, arg0 arrow.Timestamp, _ *error) arrow.Date64 { tm := fnToTime(arg0) + if _, offset := tm.Zone(); offset != 0 { + // normalize the tm + tm = tm.Add(time.Duration(offset) * time.Second).UTC() + } return arrow.Date64FromTime(tm) })(ctx, batch, out) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/compareoperator_string.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/compareoperator_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/compareoperator_string.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/compareoperator_string.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/doc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/doc.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/helpers.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/helpers.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/helpers.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/helpers.go index ed25071c..4a9ead12 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/helpers.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/helpers.go @@ -22,13 +22,13 @@ import ( "fmt" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/arrow/scalar" - "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/scalar" + "github.com/apache/arrow-go/v18/internal/bitutils" "golang.org/x/exp/constraints" ) @@ -37,9 +37,9 @@ import ( // which will receive a slice containing the raw input data along with // a slice to populate for the output data. // -// Note that bool is not included in exec.FixedWidthTypes since it is +// Note that bool is not included in arrow.FixedWidthType since it is // represented as a bitmap, not as a slice of bool. -func ScalarUnary[OutT, Arg0T exec.FixedWidthTypes](op func(*exec.KernelCtx, []Arg0T, []OutT) error) exec.ArrayKernelExec { +func ScalarUnary[OutT, Arg0T arrow.FixedWidthType](op func(*exec.KernelCtx, []Arg0T, []OutT) error) exec.ArrayKernelExec { return func(ctx *exec.KernelCtx, in *exec.ExecSpan, out *exec.ExecResult) error { arg0 := in.Values[0].Array inData := exec.GetSpanValues[Arg0T](&arg0, 1) @@ -51,7 +51,7 @@ func ScalarUnary[OutT, Arg0T exec.FixedWidthTypes](op func(*exec.KernelCtx, []Ar // ScalarUnaryNotNull is for generating a kernel to operate only on the // non-null values in the input array. The zerovalue of the output type // is used for any null input values. -func ScalarUnaryNotNull[OutT, Arg0T exec.FixedWidthTypes](op func(*exec.KernelCtx, Arg0T, *error) OutT) exec.ArrayKernelExec { +func ScalarUnaryNotNull[OutT, Arg0T arrow.FixedWidthType](op func(*exec.KernelCtx, Arg0T, *error) OutT) exec.ArrayKernelExec { return func(ctx *exec.KernelCtx, in *exec.ExecSpan, out *exec.ExecResult) error { var ( arg0 = &in.Values[0].Array @@ -78,7 +78,7 @@ func ScalarUnaryNotNull[OutT, Arg0T exec.FixedWidthTypes](op func(*exec.KernelCt // ScalarUnaryBoolOutput is like ScalarUnary only it is for cases of boolean // output. The function should take in a slice of the input type and a slice // of bytes to fill with the output boolean bitmap. -func ScalarUnaryBoolOutput[Arg0T exec.FixedWidthTypes](op func(*exec.KernelCtx, []Arg0T, []byte) error) exec.ArrayKernelExec { +func ScalarUnaryBoolOutput[Arg0T arrow.FixedWidthType](op func(*exec.KernelCtx, []Arg0T, []byte) error) exec.ArrayKernelExec { return func(ctx *exec.KernelCtx, in *exec.ExecSpan, out *exec.ExecResult) error { arg0 := in.Values[0].Array inData := exec.GetSpanValues[Arg0T](&arg0, 1) @@ -127,7 +127,7 @@ func ScalarUnaryNotNullBinaryArgBoolOut[OffsetT int32 | int64](defVal bool, op f // It implements the handling to iterate the offsets and values calling // the provided function on each byte slice. The zero value of the OutT // will be used as the output for elements of the input that are null. -func ScalarUnaryNotNullBinaryArg[OutT exec.FixedWidthTypes, OffsetT int32 | int64](op func(*exec.KernelCtx, []byte, *error) OutT) exec.ArrayKernelExec { +func ScalarUnaryNotNullBinaryArg[OutT arrow.FixedWidthType, OffsetT int32 | int64](op func(*exec.KernelCtx, []byte, *error) OutT) exec.ArrayKernelExec { return func(ctx *exec.KernelCtx, in *exec.ExecSpan, out *exec.ExecResult) error { var ( arg0 = &in.Values[0].Array @@ -156,14 +156,14 @@ func ScalarUnaryNotNullBinaryArg[OutT exec.FixedWidthTypes, OffsetT int32 | int6 // ScalarUnaryBoolArg is like ScalarUnary except it specifically expects a // function that takes a byte slice since booleans arrays are represented // as a bitmap. -func ScalarUnaryBoolArg[OutT exec.FixedWidthTypes](op func(*exec.KernelCtx, []byte, []OutT) error) exec.ArrayKernelExec { +func ScalarUnaryBoolArg[OutT arrow.FixedWidthType](op func(*exec.KernelCtx, []byte, []OutT) error) exec.ArrayKernelExec { return func(ctx *exec.KernelCtx, input *exec.ExecSpan, out *exec.ExecResult) error { outData := exec.GetSpanValues[OutT](out, 1) return op(ctx, input.Values[0].Array.Buffers[1].Buf, outData) } } -func UnboxScalar[T exec.FixedWidthTypes](val scalar.PrimitiveScalar) T { +func UnboxScalar[T arrow.FixedWidthType](val scalar.PrimitiveScalar) T { return *(*T)(unsafe.Pointer(&val.Data()[0])) } @@ -174,11 +174,11 @@ func UnboxBinaryScalar(val scalar.BinaryScalar) []byte { return val.Data() } -type arrArrFn[OutT, Arg0T, Arg1T exec.FixedWidthTypes] func(*exec.KernelCtx, []Arg0T, []Arg1T, []OutT) error -type arrScalarFn[OutT, Arg0T, Arg1T exec.FixedWidthTypes] func(*exec.KernelCtx, []Arg0T, Arg1T, []OutT) error -type scalarArrFn[OutT, Arg0T, Arg1T exec.FixedWidthTypes] func(*exec.KernelCtx, Arg0T, []Arg1T, []OutT) error +type arrArrFn[OutT, Arg0T, Arg1T arrow.FixedWidthType] func(*exec.KernelCtx, []Arg0T, []Arg1T, []OutT) error +type arrScalarFn[OutT, Arg0T, Arg1T arrow.FixedWidthType] func(*exec.KernelCtx, []Arg0T, Arg1T, []OutT) error +type scalarArrFn[OutT, Arg0T, Arg1T arrow.FixedWidthType] func(*exec.KernelCtx, Arg0T, []Arg1T, []OutT) error -type binaryOps[OutT, Arg0T, Arg1T exec.FixedWidthTypes] struct { +type binaryOps[OutT, Arg0T, Arg1T arrow.FixedWidthType] struct { arrArr arrArrFn[OutT, Arg0T, Arg1T] arrScalar arrScalarFn[OutT, Arg0T, Arg1T] scalarArr scalarArrFn[OutT, Arg0T, Arg1T] @@ -190,7 +190,7 @@ type binaryBoolOps struct { scalarArr func(ctx *exec.KernelCtx, lhs bool, rhs, out bitutil.Bitmap) error } -func ScalarBinary[OutT, Arg0T, Arg1T exec.FixedWidthTypes](ops binaryOps[OutT, Arg0T, Arg1T]) exec.ArrayKernelExec { +func ScalarBinary[OutT, Arg0T, Arg1T arrow.FixedWidthType](ops binaryOps[OutT, Arg0T, Arg1T]) exec.ArrayKernelExec { arrayArray := func(ctx *exec.KernelCtx, arg0, arg1 *exec.ArraySpan, out *exec.ExecResult) error { var ( a0 = exec.GetSpanValues[Arg0T](arg0, 1) @@ -281,7 +281,7 @@ func ScalarBinaryBools(ops *binaryBoolOps) exec.ArrayKernelExec { } } -func ScalarBinaryNotNull[OutT, Arg0T, Arg1T exec.FixedWidthTypes](op func(*exec.KernelCtx, Arg0T, Arg1T, *error) OutT) exec.ArrayKernelExec { +func ScalarBinaryNotNull[OutT, Arg0T, Arg1T arrow.FixedWidthType](op func(*exec.KernelCtx, Arg0T, Arg1T, *error) OutT) exec.ArrayKernelExec { arrayArray := func(ctx *exec.KernelCtx, arg0, arg1 *exec.ArraySpan, out *exec.ExecResult) (err error) { // fast path if one side is entirely null if arg0.UpdateNullCount() == arg0.Len || arg1.UpdateNullCount() == arg1.Len { @@ -379,7 +379,7 @@ func ScalarBinaryNotNull[OutT, Arg0T, Arg1T exec.FixedWidthTypes](op func(*exec. } } -type binaryBinOp[T exec.FixedWidthTypes | bool] func(ctx *exec.KernelCtx, arg0, arg1 []byte) T +type binaryBinOp[T arrow.FixedWidthType | bool] func(ctx *exec.KernelCtx, arg0, arg1 []byte) T func ScalarBinaryBinaryArgsBoolOut(itrFn func(*exec.ArraySpan) exec.ArrayIter[[]byte], op binaryBinOp[bool]) exec.ArrayKernelExec { arrArr := func(ctx *exec.KernelCtx, arg0, arg1 *exec.ArraySpan, out *exec.ExecResult) error { @@ -577,7 +577,7 @@ func intsCanFit(data *exec.ArraySpan, target arrow.Type) error { } } -func intsInRange[T exec.IntTypes | exec.UintTypes](data *exec.ArraySpan, lowerBound, upperBound T) error { +func intsInRange[T arrow.IntType | arrow.UintType](data *exec.ArraySpan, lowerBound, upperBound T) error { if MinOf[T]() >= lowerBound && MaxOf[T]() <= upperBound { return nil } @@ -653,7 +653,7 @@ func intsInRange[T exec.IntTypes | exec.UintTypes](data *exec.ArraySpan, lowerBo } type numeric interface { - exec.IntTypes | exec.UintTypes | constraints.Float + arrow.IntType | arrow.UintType | constraints.Float } func memCpySpan[T numeric](in, out *exec.ArraySpan) { @@ -883,12 +883,12 @@ func (bldr *execBufBuilder) finish() (buf *memory.Buffer) { return } -type bufferBuilder[T exec.FixedWidthTypes] struct { +type bufferBuilder[T arrow.FixedWidthType] struct { execBufBuilder zero T } -func newBufferBuilder[T exec.FixedWidthTypes](mem memory.Allocator) *bufferBuilder[T] { +func newBufferBuilder[T arrow.FixedWidthType](mem memory.Allocator) *bufferBuilder[T] { return &bufferBuilder[T]{ execBufBuilder: execBufBuilder{ mem: mem, @@ -901,11 +901,11 @@ func (b *bufferBuilder[T]) reserve(additional int) { } func (b *bufferBuilder[T]) unsafeAppend(value T) { - b.execBufBuilder.unsafeAppend(exec.GetBytes([]T{value})) + b.execBufBuilder.unsafeAppend(arrow.GetBytes([]T{value})) } func (b *bufferBuilder[T]) unsafeAppendSlice(values []T) { - b.execBufBuilder.unsafeAppend(exec.GetBytes(values)) + b.execBufBuilder.unsafeAppend(arrow.GetBytes(values)) } func (b *bufferBuilder[T]) len() int { return b.sz / int(unsafe.Sizeof(b.zero)) } @@ -914,7 +914,7 @@ func (b *bufferBuilder[T]) cap() int { return cap(b.data) / int(unsafe.Sizeof(b.zero)) } -func checkIndexBoundsImpl[T exec.IntTypes | exec.UintTypes](values *exec.ArraySpan, upperLimit uint64) error { +func checkIndexBoundsImpl[T arrow.IntType | arrow.UintType](values *exec.ArraySpan, upperLimit uint64) error { // for unsigned integers, if the values array is larger // than the maximum index value, then there's no need to bounds check isSigned := !arrow.IsUnsignedInteger(values.Type.ID()) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/numeric_cast.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/numeric_cast.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/numeric_cast.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/numeric_cast.go index 8e535075..1e76709e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/numeric_cast.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/numeric_cast.go @@ -23,13 +23,13 @@ import ( "strconv" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/internal/bitutils" "golang.org/x/exp/constraints" ) @@ -69,13 +69,13 @@ func CastIntegerToFloating(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec. return nil } -type decimal[T exec.DecimalTypes] interface { +type decimal[T decimal128.Num | decimal256.Num] interface { Less(T) bool GreaterEqual(T) bool LowBits() uint64 } -func decimalToIntImpl[InT exec.DecimalTypes, OutT exec.IntTypes | exec.UintTypes](allowOverflow bool, min, max InT, v decimal[InT], err *error) OutT { +func decimalToIntImpl[InT decimal128.Num | decimal256.Num, OutT arrow.IntType | arrow.UintType](allowOverflow bool, min, max InT, v decimal[InT], err *error) OutT { if !allowOverflow && (v.Less(min) || v.GreaterEqual(max)) { debug.Log("integer value out of bounds from decimal") *err = fmt.Errorf("%w: integer value out of bounds", arrow.ErrInvalid) @@ -84,7 +84,7 @@ func decimalToIntImpl[InT exec.DecimalTypes, OutT exec.IntTypes | exec.UintTypes return OutT(v.LowBits()) } -func CastDecimal256ToInteger[T exec.IntTypes | exec.UintTypes](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { +func CastDecimal256ToInteger[T arrow.IntType | arrow.UintType](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { var ( opts = ctx.State.(CastState) inputType = batch.Values[0].Type().(*arrow.Decimal256Type) @@ -125,7 +125,7 @@ func CastDecimal256ToInteger[T exec.IntTypes | exec.UintTypes](ctx *exec.KernelC return ex(ctx, batch, out) } -func CastDecimal128ToInteger[T exec.IntTypes | exec.UintTypes](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { +func CastDecimal128ToInteger[T arrow.IntType | arrow.UintType](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { var ( opts = ctx.State.(CastState) inputType = batch.Values[0].Type().(*arrow.Decimal128Type) @@ -166,7 +166,7 @@ func CastDecimal128ToInteger[T exec.IntTypes | exec.UintTypes](ctx *exec.KernelC return ex(ctx, batch, out) } -func integerToDecimal128[T exec.IntTypes | exec.UintTypes](inType arrow.Type, outScale int32) exec.ArrayKernelExec { +func integerToDecimal128[T arrow.IntType | arrow.UintType](inType arrow.Type, outScale int32) exec.ArrayKernelExec { var getDecimal func(v T) decimal128.Num switch inType { case arrow.UINT8, arrow.UINT16, arrow.UINT32, arrow.UINT64: @@ -183,7 +183,7 @@ func integerToDecimal128[T exec.IntTypes | exec.UintTypes](inType arrow.Type, ou }) } -func integerToDecimal256[T exec.IntTypes | exec.UintTypes](inType arrow.Type, outScale int32) exec.ArrayKernelExec { +func integerToDecimal256[T arrow.IntType | arrow.UintType](inType arrow.Type, outScale int32) exec.ArrayKernelExec { var getDecimal func(v T) decimal256.Num switch inType { case arrow.UINT8, arrow.UINT16, arrow.UINT32, arrow.UINT64: @@ -200,7 +200,7 @@ func integerToDecimal256[T exec.IntTypes | exec.UintTypes](inType arrow.Type, ou }) } -func CastIntegerToDecimal[OutT exec.DecimalTypes, Arg0 exec.IntTypes | exec.UintTypes](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { +func CastIntegerToDecimal[OutT decimal128.Num | decimal256.Num, Arg0 arrow.IntType | arrow.UintType](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { var ( precision, scale int32 executor exec.ArrayKernelExec @@ -234,7 +234,7 @@ func CastIntegerToDecimal[OutT exec.DecimalTypes, Arg0 exec.IntTypes | exec.Uint return executor(ctx, batch, out) } -func getCastIntToDecimal[T exec.DecimalTypes](inType arrow.Type) exec.ArrayKernelExec { +func getCastIntToDecimal[T decimal128.Num | decimal256.Num](inType arrow.Type) exec.ArrayKernelExec { switch inType { case arrow.UINT8: return CastIntegerToDecimal[T, uint8] @@ -543,7 +543,7 @@ func boolToNum[T numeric](_ *exec.KernelCtx, in []byte, out []T) error { return nil } -func checkFloatTrunc[InT constraints.Float, OutT exec.IntTypes | exec.UintTypes](in, out *exec.ArraySpan) error { +func checkFloatTrunc[InT constraints.Float, OutT arrow.IntType | arrow.UintType](in, out *exec.ArraySpan) error { wasTrunc := func(out OutT, in InT) bool { return InT(out) != in } @@ -665,7 +665,7 @@ func checkIntToFloatTrunc(in *exec.ArraySpan, outType arrow.Type) error { return nil } -func parseStringToNumberImpl[T exec.IntTypes | exec.UintTypes | exec.FloatTypes, OffsetT int32 | int64](parseFn func(string) (T, error)) exec.ArrayKernelExec { +func parseStringToNumberImpl[T arrow.IntType | arrow.UintType | arrow.FloatType, OffsetT int32 | int64](parseFn func(string) (T, error)) exec.ArrayKernelExec { return ScalarUnaryNotNullBinaryArg[T, OffsetT](func(_ *exec.KernelCtx, in []byte, err *error) T { st := *(*string)(unsafe.Pointer(&in)) v, e := parseFn(st) @@ -749,7 +749,7 @@ func addCommonNumberCasts[T numeric](outTy arrow.DataType, kernels []exec.Scalar return kernels } -func GetCastToInteger[T exec.IntTypes | exec.UintTypes](outType arrow.DataType) []exec.ScalarKernel { +func GetCastToInteger[T arrow.IntType | arrow.UintType](outType arrow.DataType) []exec.ScalarKernel { kernels := make([]exec.ScalarKernel, 0) output := exec.NewOutputType(outType) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/rounding.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/rounding.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/rounding.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/rounding.go index 2f58a9fa..46b68f3c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/rounding.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/rounding.go @@ -22,11 +22,11 @@ import ( "fmt" "math" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/scalar" "golang.org/x/exp/constraints" ) @@ -619,7 +619,7 @@ func (rnd *roundToMultipleDec[T]) call(_ *exec.KernelCtx, arg T, e *error) T { if rnd.mode >= HalfDown { if rnd.hasHalfwayPoint && (remainder == rnd.halfMult || remainder == rnd.negHalfMult) { // on the halfway point, use tiebreaker - // manually implement rounding since we're not actually rounding + // manually implement rounding since we aren't actually rounding // a decimal value, but rather manipulating the multiple switch rnd.mode { case HalfDown: @@ -666,7 +666,7 @@ func (rnd *roundToMultipleDec[T]) call(_ *exec.KernelCtx, arg T, e *error) T { } } } else { - // manually implement rounding since we're not actually rounding + // manually implement rounding since we aren't actually rounding // a decimal value, but rather manipulating the multiple switch rnd.mode { case RoundDown: diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/roundmode_string.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/roundmode_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/roundmode_string.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/roundmode_string.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_arithmetic.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_arithmetic.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_arithmetic.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_arithmetic.go index 9cb32ae6..cfcdd1cf 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_arithmetic.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_arithmetic.go @@ -22,13 +22,13 @@ import ( "fmt" "time" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/scalar" ) // scalar kernel that ignores (assumed all-null inputs) and returns null @@ -254,7 +254,7 @@ func GetBitwiseBinaryKernels(op BitwiseOp) []exec.ScalarKernel { return append(kernels, NullExecKernel(2)) } -func bitwiseNot[T exec.IntTypes | exec.UintTypes](_ *exec.KernelCtx, arg T, _ *error) T { +func bitwiseNot[T arrow.IntType | arrow.UintType](_ *exec.KernelCtx, arg T, _ *error) T { return ^arg } @@ -290,7 +290,7 @@ const ( ShiftRight ) -func shiftKernelSignedImpl[T exec.IntTypes, Unsigned exec.UintTypes](dir ShiftDir, checked bool) exec.ArrayKernelExec { +func shiftKernelSignedImpl[T arrow.IntType, Unsigned arrow.UintType](dir ShiftDir, checked bool) exec.ArrayKernelExec { errShift := fmt.Errorf("%w: shift amount must be >= 0 and less than precision of type", arrow.ErrInvalid) maxShift := T(8*SizeOf[T]() - 1) @@ -334,7 +334,7 @@ func shiftKernelSignedImpl[T exec.IntTypes, Unsigned exec.UintTypes](dir ShiftDi return nil } -func shiftKernelUnsignedImpl[T exec.UintTypes](dir ShiftDir, checked bool) exec.ArrayKernelExec { +func shiftKernelUnsignedImpl[T arrow.UintType](dir ShiftDir, checked bool) exec.ArrayKernelExec { errShift := fmt.Errorf("%w: shift amount must be >= 0 and less than precision of type", arrow.ErrInvalid) maxShift := T(8 * SizeOf[T]()) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_boolean.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_boolean.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_boolean.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_boolean.go index 812f4ad1..36598a09 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_boolean.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_boolean.go @@ -19,14 +19,14 @@ package kernels import ( - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/scalar" ) type computeWordFN func(leftTrue, leftFalse, rightTrue, rightFalse uint64) (outValid, outData uint64) -func computeKleene(computeWord computeWordFN, ctx *exec.KernelCtx, left, right *exec.ArraySpan, out *exec.ExecResult) error { +func computeKleene(computeWord computeWordFN, _ *exec.KernelCtx, left, right *exec.ArraySpan, out *exec.ExecResult) error { var ( inBMs = [4]bitutil.Bitmap{ {Data: left.Buffers[0].Buf, Offset: left.Offset, Len: left.Len}, @@ -332,3 +332,16 @@ func (KleeneAndNotOpKernel) CallScalarLeft(ctx *exec.KernelCtx, left scalar.Scal func (KleeneAndNotOpKernel) CallScalarRight(ctx *exec.KernelCtx, left *exec.ArraySpan, right scalar.Scalar, out *exec.ExecResult) error { return (KleeneAndOpKernel{}).CallScalarRight(ctx, left, invertScalar(right), out) } + +func NotExecKernel(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { + bitutil.InvertBitmap(batch.Values[0].Array.Buffers[1].Buf, int(batch.Values[0].Array.Offset), + int(batch.Values[0].Array.Len), out.Buffers[1].Buf, int(out.Offset)) + + out.Buffers[0] = batch.Values[0].Array.Buffers[0] + if out.Buffers[0].SelfAlloc { + out.Buffers[0].SelfAlloc = false + } + out.Nulls = batch.Values[0].Array.Nulls + + return nil +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_amd64.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_amd64.go index 585d1bff..7e66fbf2 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_amd64.go @@ -21,8 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow" "golang.org/x/sys/cpu" ) @@ -32,12 +31,12 @@ type cmpfn func(arrow.Type, []byte, []byte, []byte, int64, int) var comparisonMap map[CompareOperator][3]cmpfn -func genCompareKernel[T exec.NumericTypes](op CompareOperator) *CompareData { +func genCompareKernel[T arrow.NumericType](op CompareOperator) *CompareData { if pureGo { return genGoCompareKernel(getCmpOp[T](op)) } - ty := exec.GetType[T]() + ty := arrow.GetType[T]() byteWidth := int(unsafe.Sizeof(T(0))) comparisonFns := comparisonMap[op] return &CompareData{ diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go index 86817905..d1c235f8 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s index bfc999b8..30246a22 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s @@ -1,4 +1,4 @@ -//go:build go1.18 && !noasm && !appengine +//+build !noasm !appengine // AUTO-GENERATED BY C2GOASM -- DO NOT EDIT TEXT ·_comparison_equal_arr_arr_avx2(SB), $80-48 @@ -11,8 +11,9 @@ TEXT ·_comparison_equal_arr_arr_avx2(SB), $80-48 MOVQ offset+40(FP), R9 ADDQ $8, SP - WORD $0x894d; BYTE $0xc3 // mov r11, r8 - WORD $0x8949; BYTE $0xce // mov r14, rcx + WORD $0x8944; BYTE $0xc8 // mov eax, r9d + WORD $0x894d; BYTE $0xc6 // mov r14, r8 + WORD $0x8949; BYTE $0xcc // mov r12, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB0_29 WORD $0xff83; BYTE $0x03 // cmp edi, 3 @@ -23,16 +24,16 @@ TEXT ·_comparison_equal_arr_arr_avx2(SB), $80-48 JE LBB0_79 WORD $0xff83; BYTE $0x06 // cmp edi, 6 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_22 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_20: WORD $0x0e8b // mov ecx, dword [rsi] @@ -45,7 +46,7 @@ LBB0_20: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -54,49 +55,49 @@ LBB0_20: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_20 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_22: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_26 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_24: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5940f41 // sete r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0940f41 // sete r8b @@ -108,165 +109,165 @@ LBB0_24: LONG $0xd7940f41 // sete r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2940f41 // sete r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6940f41 // sete r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4940f41 // sete r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1940f41 // sete r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_24 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_26: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_28: @@ -277,16 +278,16 @@ LBB0_28: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_28 JMP LBB0_123 @@ -299,266 +300,361 @@ LBB0_29: JE LBB0_112 WORD $0xff83; BYTE $0x0c // cmp edi, 12 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_50 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB0_48: LONG $0x0610fbc5 // vmovsd xmm0, qword [rsi] LONG $0x08c68348 // add rsi, 8 LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd2940f41 // sete r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9b0f; BYTE $0xd1 // setnp cl + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB0_48 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_50: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_54 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB0_52: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x0610fbc5 // vmovsd xmm0, qword [rsi] - LONG $0x4e10fbc5; BYTE $0x08 // vmovsd xmm1, qword [rsi + 8] LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] - LONG $0x4a2ef9c5; BYTE $0x08 // vucomisd xmm1, qword [rdx + 8] - WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x4610fbc5; BYTE $0x08 // vmovsd xmm0, qword [rsi + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x422ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rdx + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x4610fbc5; BYTE $0x10 // vmovsd xmm0, qword [rsi + 16] LONG $0x422ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rdx + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x4610fbc5; BYTE $0x18 // vmovsd xmm0, qword [rsi + 24] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x422ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rdx + 24] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x4610fbc5; BYTE $0x20 // vmovsd xmm0, qword [rsi + 32] LONG $0x422ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rdx + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x4610fbc5; BYTE $0x28 // vmovsd xmm0, qword [rsi + 40] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x422ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rdx + 40] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x4610fbc5; BYTE $0x30 // vmovsd xmm0, qword [rsi + 48] LONG $0x422ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rdx + 48] - LONG $0x4610fbc5; BYTE $0x38 // vmovsd xmm0, qword [rsi + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al + LONG $0x4610fbc5; BYTE $0x38 // vmovsd xmm0, qword [rsi + 56] LONG $0x422ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rdx + 56] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x4610fbc5; BYTE $0x40 // vmovsd xmm0, qword [rsi + 64] LONG $0x422ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rdx + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x4610fbc5; BYTE $0x48 // vmovsd xmm0, qword [rsi + 72] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x422ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rdx + 72] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x4610fbc5; BYTE $0x50 // vmovsd xmm0, qword [rsi + 80] LONG $0x422ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rdx + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x4610fbc5; BYTE $0x58 // vmovsd xmm0, qword [rsi + 88] - LONG $0xd1940f41 // sete r9b LONG $0x422ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rdx + 88] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x4610fbc5; BYTE $0x60 // vmovsd xmm0, qword [rsi + 96] LONG $0x422ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rdx + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x4610fbc5; BYTE $0x68 // vmovsd xmm0, qword [rsi + 104] - LONG $0xd2940f41 // sete r10b LONG $0x422ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rdx + 104] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x4610fbc5; BYTE $0x70 // vmovsd xmm0, qword [rsi + 112] LONG $0x422ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rdx + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x4610fbc5; BYTE $0x78 // vmovsd xmm0, qword [rsi + 120] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x422ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rdx + 120] - WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl QUAD $0x000000808610fbc5 // vmovsd xmm0, qword [rsi + 128] QUAD $0x00000080822ef9c5 // vucomisd xmm0, qword [rdx + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl QUAD $0x000000888610fbc5 // vmovsd xmm0, qword [rsi + 136] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] QUAD $0x00000088822ef9c5 // vucomisd xmm0, qword [rdx + 136] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl QUAD $0x000000908610fbc5 // vmovsd xmm0, qword [rsi + 144] - LONG $0xd6940f41 // sete r14b QUAD $0x00000090822ef9c5 // vucomisd xmm0, qword [rdx + 144] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl QUAD $0x000000988610fbc5 // vmovsd xmm0, qword [rsi + 152] - LONG $0xd4940f41 // sete r12b QUAD $0x00000098822ef9c5 // vucomisd xmm0, qword [rdx + 152] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al QUAD $0x000000a08610fbc5 // vmovsd xmm0, qword [rsi + 160] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] QUAD $0x000000a0822ef9c5 // vucomisd xmm0, qword [rdx + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl QUAD $0x000000a88610fbc5 // vmovsd xmm0, qword [rsi + 168] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] QUAD $0x000000a8822ef9c5 // vucomisd xmm0, qword [rdx + 168] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl QUAD $0x000000b08610fbc5 // vmovsd xmm0, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] QUAD $0x000000b0822ef9c5 // vucomisd xmm0, qword [rdx + 176] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl QUAD $0x000000b88610fbc5 // vmovsd xmm0, qword [rsi + 184] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] QUAD $0x000000b8822ef9c5 // vucomisd xmm0, qword [rdx + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al QUAD $0x000000c08610fbc5 // vmovsd xmm0, qword [rsi + 192] - LONG $0xd0940f41 // sete r8b QUAD $0x000000c0822ef9c5 // vucomisd xmm0, qword [rdx + 192] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl QUAD $0x000000c88610fbc5 // vmovsd xmm0, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] QUAD $0x000000c8822ef9c5 // vucomisd xmm0, qword [rdx + 200] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl QUAD $0x000000d08610fbc5 // vmovsd xmm0, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] QUAD $0x000000d0822ef9c5 // vucomisd xmm0, qword [rdx + 208] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al QUAD $0x000000d88610fbc5 // vmovsd xmm0, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] QUAD $0x000000d8822ef9c5 // vucomisd xmm0, qword [rdx + 216] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al QUAD $0x000000e08610fbc5 // vmovsd xmm0, qword [rsi + 224] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] QUAD $0x000000e0822ef9c5 // vucomisd xmm0, qword [rdx + 224] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl QUAD $0x000000e88610fbc5 // vmovsd xmm0, qword [rsi + 232] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] QUAD $0x000000e8822ef9c5 // vucomisd xmm0, qword [rdx + 232] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl QUAD $0x000000f08610fbc5 // vmovsd xmm0, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] QUAD $0x000000f0822ef9c5 // vucomisd xmm0, qword [rdx + 240] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al QUAD $0x000000f88610fbc5 // vmovsd xmm0, qword [rsi + 248] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 QUAD $0x000000f8822ef9c5 // vucomisd xmm0, qword [rdx + 248] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 + LONG $0x04e5c041 // shl r13b, 4 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0841; BYTE $0xce // or r14b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x74b60f44; WORD $0x0324 // movzx r14d, byte [rsp + 3] + WORD $0x0845; BYTE $0xee // or r14b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xcf // or r15b, cl + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x03e2c041 // shl r10b, 3 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xd1 // or cl, r10b + LONG $0x24348845 // mov byte [r12], r14b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB0_52 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB0_54: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_56: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x0410fbc5; BYTE $0xce // vmovsd xmm0, qword [rsi + 8*rcx] LONG $0x042ef9c5; BYTE $0xca // vucomisd xmm0, qword [rdx + 8*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0xdbf6 // neg bl + WORD $0x9b0f; BYTE $0xd3 // setnp bl + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xd820 // and al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB0_56 JMP LBB0_123 @@ -567,16 +663,16 @@ LBB0_2: JE LBB0_57 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_8 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_6: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -589,7 +685,7 @@ LBB0_6: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -598,49 +694,49 @@ LBB0_6: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_6 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_8: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_12 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB0_10: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7940f41 // sete r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7940f40 // sete dil @@ -655,16 +751,16 @@ LBB0_10: LONG $0xd6940f41 // sete r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd0940f41 // sete r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4940f41 // sete r12b @@ -673,144 +769,144 @@ LBB0_10: LONG $0xd5940f41 // sete r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1940f41 // sete r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0940f41 // sete r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB0_10 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB0_12: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_14: @@ -821,16 +917,16 @@ LBB0_14: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_14 JMP LBB0_123 @@ -839,16 +935,16 @@ LBB0_30: JE LBB0_90 WORD $0xff83; BYTE $0x08 // cmp edi, 8 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_36 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_34: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -861,7 +957,7 @@ LBB0_34: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -870,49 +966,49 @@ LBB0_34: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_34 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_36: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_40 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_38: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5940f41 // sete r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0940f41 // sete r8b @@ -924,165 +1020,165 @@ LBB0_38: LONG $0xd7940f41 // sete r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2940f41 // sete r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6940f41 // sete r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4940f41 // sete r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1940f41 // sete r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_38 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_40: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_42: @@ -1093,30 +1189,30 @@ LBB0_42: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_42 JMP LBB0_123 LBB0_68: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_72 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_70: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -1129,7 +1225,7 @@ LBB0_70: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -1138,49 +1234,49 @@ LBB0_70: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_70 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_72: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_76 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_74: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5940f41 // sete r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0940f41 // sete r8b @@ -1192,165 +1288,165 @@ LBB0_74: LONG $0xd7940f41 // sete r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2940f41 // sete r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6940f41 // sete r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4940f41 // sete r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1940f41 // sete r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_74 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_76: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_78: @@ -1361,30 +1457,30 @@ LBB0_78: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_78 JMP LBB0_123 LBB0_79: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_83 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_81: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -1397,7 +1493,7 @@ LBB0_81: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -1406,49 +1502,49 @@ LBB0_81: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_81 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_83: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_87 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_85: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5940f41 // sete r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0940f41 // sete r8b @@ -1460,165 +1556,165 @@ LBB0_85: LONG $0xd7940f41 // sete r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2940f41 // sete r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6940f41 // sete r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4940f41 // sete r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1940f41 // sete r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_85 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_87: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_89: @@ -1629,30 +1725,30 @@ LBB0_89: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_89 JMP LBB0_123 LBB0_101: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_105 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_103: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -1665,7 +1761,7 @@ LBB0_103: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -1674,49 +1770,49 @@ LBB0_103: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_103 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_105: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_109 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_107: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5940f41 // sete r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0940f41 // sete r8b @@ -1728,165 +1824,165 @@ LBB0_107: LONG $0xd7940f41 // sete r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2940f41 // sete r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6940f41 // sete r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4940f41 // sete r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1940f41 // sete r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_107 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_109: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_111: @@ -1897,294 +1993,389 @@ LBB0_111: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_111 JMP LBB0_123 LBB0_112: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_116 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB0_114: LONG $0x0610fac5 // vmovss xmm0, dword [rsi] LONG $0x04c68348 // add rsi, 4 LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] LONG $0x04528d48 // lea rdx, [rdx + 4] - LONG $0xd2940f41 // sete r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9b0f; BYTE $0xd1 // setnp cl + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB0_114 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_116: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_120 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB0_118: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x0610fac5 // vmovss xmm0, dword [rsi] - LONG $0x4e10fac5; BYTE $0x04 // vmovss xmm1, dword [rsi + 4] LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] - LONG $0x4a2ef8c5; BYTE $0x04 // vucomiss xmm1, dword [rdx + 4] - WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x4610fac5; BYTE $0x04 // vmovss xmm0, dword [rsi + 4] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x422ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rdx + 4] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x4610fac5; BYTE $0x08 // vmovss xmm0, dword [rsi + 8] LONG $0x422ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rdx + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x4610fac5; BYTE $0x0c // vmovss xmm0, dword [rsi + 12] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x422ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rdx + 12] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x4610fac5; BYTE $0x10 // vmovss xmm0, dword [rsi + 16] LONG $0x422ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rdx + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x4610fac5; BYTE $0x14 // vmovss xmm0, dword [rsi + 20] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x422ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rdx + 20] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x4610fac5; BYTE $0x18 // vmovss xmm0, dword [rsi + 24] LONG $0x422ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rdx + 24] - LONG $0x4610fac5; BYTE $0x1c // vmovss xmm0, dword [rsi + 28] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al + LONG $0x4610fac5; BYTE $0x1c // vmovss xmm0, dword [rsi + 28] LONG $0x422ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rdx + 28] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x4610fac5; BYTE $0x20 // vmovss xmm0, dword [rsi + 32] LONG $0x422ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rdx + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x4610fac5; BYTE $0x24 // vmovss xmm0, dword [rsi + 36] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x422ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rdx + 36] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x4610fac5; BYTE $0x28 // vmovss xmm0, dword [rsi + 40] LONG $0x422ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rdx + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x4610fac5; BYTE $0x2c // vmovss xmm0, dword [rsi + 44] - LONG $0xd1940f41 // sete r9b LONG $0x422ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rdx + 44] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x4610fac5; BYTE $0x30 // vmovss xmm0, dword [rsi + 48] LONG $0x422ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rdx + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x4610fac5; BYTE $0x34 // vmovss xmm0, dword [rsi + 52] - LONG $0xd2940f41 // sete r10b LONG $0x422ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rdx + 52] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x4610fac5; BYTE $0x38 // vmovss xmm0, dword [rsi + 56] LONG $0x422ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rdx + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x4610fac5; BYTE $0x3c // vmovss xmm0, dword [rsi + 60] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x422ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rdx + 60] - WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl LONG $0x4610fac5; BYTE $0x40 // vmovss xmm0, dword [rsi + 64] LONG $0x422ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rdx + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl LONG $0x4610fac5; BYTE $0x44 // vmovss xmm0, dword [rsi + 68] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x422ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rdx + 68] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl LONG $0x4610fac5; BYTE $0x48 // vmovss xmm0, dword [rsi + 72] - LONG $0xd6940f41 // sete r14b LONG $0x422ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rdx + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl LONG $0x4610fac5; BYTE $0x4c // vmovss xmm0, dword [rsi + 76] - LONG $0xd4940f41 // sete r12b LONG $0x422ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rdx + 76] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al LONG $0x4610fac5; BYTE $0x50 // vmovss xmm0, dword [rsi + 80] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] LONG $0x422ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rdx + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x4610fac5; BYTE $0x54 // vmovss xmm0, dword [rsi + 84] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x422ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rdx + 84] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl LONG $0x4610fac5; BYTE $0x58 // vmovss xmm0, dword [rsi + 88] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] LONG $0x422ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rdx + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl LONG $0x4610fac5; BYTE $0x5c // vmovss xmm0, dword [rsi + 92] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x422ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rdx + 92] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al LONG $0x4610fac5; BYTE $0x60 // vmovss xmm0, dword [rsi + 96] - LONG $0xd0940f41 // sete r8b LONG $0x422ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rdx + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl LONG $0x4610fac5; BYTE $0x64 // vmovss xmm0, dword [rsi + 100] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x422ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rdx + 100] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x4610fac5; BYTE $0x68 // vmovss xmm0, dword [rsi + 104] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x422ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rdx + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al LONG $0x4610fac5; BYTE $0x6c // vmovss xmm0, dword [rsi + 108] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x422ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rdx + 108] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al LONG $0x4610fac5; BYTE $0x70 // vmovss xmm0, dword [rsi + 112] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x422ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rdx + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl LONG $0x4610fac5; BYTE $0x74 // vmovss xmm0, dword [rsi + 116] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x422ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rdx + 116] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x4610fac5; BYTE $0x78 // vmovss xmm0, dword [rsi + 120] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x422ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rdx + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al LONG $0x4610fac5; BYTE $0x7c // vmovss xmm0, dword [rsi + 124] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 LONG $0x422ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rdx + 124] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 + LONG $0x04e5c041 // shl r13b, 4 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x5cb60f44; WORD $0x0324 // movzx r11d, byte [rsp + 3] + WORD $0x0845; BYTE $0xeb // or r11b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0841; BYTE $0xca // or r10b, cl + WORD $0x0841; BYTE $0xda // or r10b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e7c041 // shl r15b, 2 + WORD $0x0841; BYTE $0xcf // or r15b, cl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xf1 // or cl, r14b + LONG $0x241c8845 // mov byte [r12], r11b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + LONG $0x24548845; BYTE $0x02 // mov byte [r12 + 2], r10b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB0_118 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB0_120: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_122: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x0410fac5; BYTE $0x8e // vmovss xmm0, dword [rsi + 4*rcx] LONG $0x042ef8c5; BYTE $0x8a // vucomiss xmm0, dword [rdx + 4*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0xdbf6 // neg bl + WORD $0x9b0f; BYTE $0xd3 // setnp bl + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xd820 // and al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB0_122 JMP LBB0_123 LBB0_57: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_61 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_59: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -2197,7 +2388,7 @@ LBB0_59: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -2206,49 +2397,49 @@ LBB0_59: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_59 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_61: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_65 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB0_63: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7940f41 // sete r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7940f40 // sete dil @@ -2263,16 +2454,16 @@ LBB0_63: LONG $0xd6940f41 // sete r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd0940f41 // sete r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4940f41 // sete r12b @@ -2281,144 +2472,144 @@ LBB0_63: LONG $0xd5940f41 // sete r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1940f41 // sete r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0940f41 // sete r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB0_63 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB0_65: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_67: @@ -2429,30 +2620,30 @@ LBB0_67: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_67 JMP LBB0_123 LBB0_90: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_94 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_92: WORD $0x0e8b // mov ecx, dword [rsi] @@ -2465,7 +2656,7 @@ LBB0_92: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -2474,49 +2665,49 @@ LBB0_92: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_92 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_94: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_98 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_96: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5940f41 // sete r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0940f41 // sete r8b @@ -2528,165 +2719,165 @@ LBB0_96: LONG $0xd7940f41 // sete r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2940f41 // sete r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6940f41 // sete r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4940f41 // sete r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1940f41 // sete r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_96 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_98: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_100: @@ -2697,16 +2888,16 @@ LBB0_100: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_100 LBB0_123: @@ -2768,7 +2959,7 @@ TEXT ·_comparison_equal_arr_scalar_avx2(SB), $1320-48 WORD $0xff83; BYTE $0x05 // cmp edi, 5 JE LBB1_57 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB1_164 + JNE LBB1_165 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -2810,15 +3001,15 @@ LBB1_9: LONG $0x20fa8349 // cmp r10, 32 JL LBB1_101 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 LBB1_11: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6940f41 // sete r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -2834,11 +3025,11 @@ LBB1_11: LONG $0x206e3944 // cmp dword [rsi + 32], r13d LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3940f41 // sete r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -2878,67 +3069,68 @@ LBB1_11: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x68 // add dil, byte [rsp + 104] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x48245402 // add dl, byte [rsp + 72] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x48244402 // add al, byte [rsp + 72] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xc900 // add cl, cl @@ -2961,23 +3153,23 @@ LBB1_11: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_11 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_102 - JMP LBB1_164 + JMP LBB1_165 LBB1_13: WORD $0xff83; BYTE $0x08 // cmp edi, 8 @@ -2987,7 +3179,7 @@ LBB1_13: WORD $0xff83; BYTE $0x0b // cmp edi, 11 JE LBB1_73 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB1_164 + JNE LBB1_165 LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -3003,7 +3195,9 @@ LBB1_13: LBB1_19: LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] LONG $0x08768d48 // lea rsi, [rsi + 8] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xca20 // and dl, cl WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax @@ -3030,163 +3224,248 @@ LBB1_21: JL LBB1_105 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 - QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 LBB1_23: LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x462ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rsi + 16] - LONG $0xd6940f41 // sete r14b - LONG $0x462ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rsi + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al + LONG $0x462ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rsi + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl LONG $0x462ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rsi + 32] - QUAD $0x000000882494940f // sete byte [rsp + 136] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl LONG $0x462ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rsi + 40] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x462ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rsi + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x462ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rsi + 56] - WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl LONG $0x462ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rsi + 64] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x462ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rsi + 72] - WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x462ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rsi + 80] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x462ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rsi + 88] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x462ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rsi + 96] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x462ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rsi + 104] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x462ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rsi + 112] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al LONG $0x462ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rsi + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl QUAD $0x00000080862ef9c5 // vucomisd xmm0, qword [rsi + 128] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl QUAD $0x00000088862ef9c5 // vucomisd xmm0, qword [rsi + 136] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl QUAD $0x00000090862ef9c5 // vucomisd xmm0, qword [rsi + 144] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al QUAD $0x00000098862ef9c5 // vucomisd xmm0, qword [rsi + 152] - QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x80248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], al QUAD $0x000000a0862ef9c5 // vucomisd xmm0, qword [rsi + 160] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x78244488 // mov byte [rsp + 120], al QUAD $0x000000a8862ef9c5 // vucomisd xmm0, qword [rsi + 168] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x68244488 // mov byte [rsp + 104], al QUAD $0x000000b0862ef9c5 // vucomisd xmm0, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al QUAD $0x000000b8862ef9c5 // vucomisd xmm0, qword [rsi + 184] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al QUAD $0x000000c0862ef9c5 // vucomisd xmm0, qword [rsi + 192] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x70244488 // mov byte [rsp + 112], al QUAD $0x000000c8862ef9c5 // vucomisd xmm0, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al QUAD $0x000000d0862ef9c5 // vucomisd xmm0, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al QUAD $0x000000d8862ef9c5 // vucomisd xmm0, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al QUAD $0x000000e0862ef9c5 // vucomisd xmm0, qword [rsi + 224] - QUAD $0x000001402494940f // sete byte [rsp + 320] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0xa0248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], al QUAD $0x000000e8862ef9c5 // vucomisd xmm0, qword [rsi + 232] - QUAD $0x000001202494940f // sete byte [rsp + 288] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x98248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], al QUAD $0x000000f0862ef9c5 // vucomisd xmm0, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al QUAD $0x000000f8862ef9c5 // vucomisd xmm0, qword [rsi + 248] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd0940f41 // sete r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x000000a0248c0244 // add r9b, byte [rsp + 160] + WORD $0x2041; BYTE $0xc0 // and r8b, al + WORD $0xdb00 // add bl, bl + LONG $0x20245c02 // add bl, byte [rsp + 32] + LONG $0x05e7c040 // shl dil, 5 + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x247cb60f; BYTE $0x50 // movzx edi, byte [rsp + 80] + LONG $0x05e7c040 // shl dil, 5 + WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x6cb60f44; WORD $0x4824 // movzx r13d, byte [rsp + 72] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xd5 // or r13b, dl + QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] + WORD $0xd200 // add dl, dl + LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd108 // or cl, dl QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + WORD $0xca08 // or dl, cl + WORD $0xd189 // mov ecx, edx + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + WORD $0xca08 // or dl, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xc108 // or cl, al + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + LONG $0x06e4c041 // shl r12b, 6 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0845; BYTE $0xe2 // or r10b, r12b + WORD $0x0841; BYTE $0xd2 // or r10b, dl + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x70 // add r15b, byte [rsp + 112] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xf3 // or r11b, r14b + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd8 // or al, r11b + WORD $0xc389 // mov ebx, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x0888 // mov byte [rax], cl + QUAD $0x000000982494b60f // movzx edx, byte [rsp + 152] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd1 // or r9b, dl + LONG $0x01688844 // mov byte [rax + 1], r13b LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + LONG $0x02508844 // mov byte [rax + 2], r10b + LONG $0x03408844 // mov byte [rax + 3], r8b LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_23 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] @@ -3194,13 +3473,13 @@ LBB1_23: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_106 - JMP LBB1_164 + JMP LBB1_165 LBB1_25: WORD $0xff83; BYTE $0x02 // cmp edi, 2 JE LBB1_81 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB1_164 + JNE LBB1_165 WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] LONG $0x1f6a8d4d // lea r13, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -3251,10 +3530,10 @@ LBB1_31: LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB1_165 + JAE LBB1_166 LONG $0xab048d4b // lea rax, [r11 + 4*r13] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB1_165 + JAE LBB1_166 LBB1_35: WORD $0xc031 // xor eax, eax @@ -3438,7 +3717,7 @@ LBB1_39: WORD $0xff83; BYTE $0x07 // cmp edi, 7 JE LBB1_93 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB1_164 + JNE LBB1_165 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -3480,15 +3759,15 @@ LBB1_45: LONG $0x20fa8349 // cmp r10, 32 JL LBB1_112 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 LBB1_47: QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6940f41 // sete r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -3504,11 +3783,11 @@ LBB1_47: LONG $0x406e394c // cmp qword [rsi + 64], r13 LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3940f41 // sete r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -3548,32 +3827,33 @@ LBB1_47: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x68 // add dil, byte [rsp + 104] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] @@ -3581,71 +3861,71 @@ LBB1_47: LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x48245402 // add dl, byte [rsp + 72] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x48244402 // add al, byte [rsp + 72] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1b // mov byte [r11], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x1c // movzx edx, byte [rsp + 28] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xc000 // add al, al + LONG $0x20244402 // add al, byte [rsp + 32] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x03438845 // mov byte [r11 + 3], r8b + LONG $0x03538841 // mov byte [r11 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_47 WORD $0x894d; BYTE $0xde // mov r14, r11 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_113 - JMP LBB1_164 + JMP LBB1_165 LBB1_49: LONG $0x2ab70f44 // movzx r13d, word [rdx] @@ -3689,15 +3969,15 @@ LBB1_53: LONG $0x20fa8349 // cmp r10, 32 JL LBB1_116 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 LBB1_55: LONG $0x2e394466 // cmp word [rsi], r13w WORD $0x940f; BYTE $0xd0 // sete al LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w LONG $0xd6940f41 // sete r14b LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w @@ -3713,11 +3993,11 @@ LBB1_55: LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w LONG $0xd3940f41 // sete r11b LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w @@ -3757,68 +4037,69 @@ LBB1_55: LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - WORD $0x0840; BYTE $0xc7 // or dil, al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x70 // add dil, byte [rsp + 112] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x68 // movzx edi, byte [rsp + 104] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x48245402 // add dl, byte [rsp + 72] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x48244402 // add al, byte [rsp + 72] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xc900 // add cl, cl @@ -3841,23 +4122,23 @@ LBB1_55: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x40c68348 // add rsi, 64 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_55 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_117 - JMP LBB1_164 + JMP LBB1_165 LBB1_57: LONG $0x2ab70f44 // movzx r13d, word [rdx] @@ -3901,15 +4182,15 @@ LBB1_61: LONG $0x20fa8349 // cmp r10, 32 JL LBB1_120 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 LBB1_63: LONG $0x2e394466 // cmp word [rsi], r13w QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w LONG $0xd6940f41 // sete r14b LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w @@ -3925,11 +4206,11 @@ LBB1_63: LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w LONG $0xd3940f41 // sete r11b LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w @@ -3969,67 +4250,68 @@ LBB1_63: LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x68 // add dil, byte [rsp + 104] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x48245402 // add dl, byte [rsp + 72] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x48244402 // add al, byte [rsp + 72] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xc900 // add cl, cl @@ -4052,23 +4334,23 @@ LBB1_63: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x40c68348 // add rsi, 64 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_63 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_121 - JMP LBB1_164 + JMP LBB1_165 LBB1_65: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] @@ -4112,15 +4394,15 @@ LBB1_69: LONG $0x20fa8349 // cmp r10, 32 JL LBB1_123 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 LBB1_71: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6940f41 // sete r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -4136,11 +4418,11 @@ LBB1_71: LONG $0x406e394c // cmp qword [rsi + 64], r13 LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3940f41 // sete r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -4180,67 +4462,68 @@ LBB1_71: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x68 // add dil, byte [rsp + 104] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x48245402 // add dl, byte [rsp + 72] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x48244402 // add al, byte [rsp + 72] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xc900 // add cl, cl @@ -4263,23 +4546,23 @@ LBB1_71: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_71 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_124 - JMP LBB1_164 + JMP LBB1_165 LBB1_73: LONG $0x1f7a8d4d // lea r15, [r10 + 31] @@ -4297,7 +4580,9 @@ LBB1_73: LBB1_75: LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xca20 // and dl, cl WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax @@ -4324,163 +4609,248 @@ LBB1_77: JL LBB1_126 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 - QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 LBB1_79: LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x462ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rsi + 8] - LONG $0xd6940f41 // sete r14b - LONG $0x462ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rsi + 12] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al + LONG $0x462ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rsi + 12] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl LONG $0x462ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rsi + 16] - QUAD $0x000000882494940f // sete byte [rsp + 136] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl LONG $0x462ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rsi + 20] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x462ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rsi + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x462ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rsi + 28] - WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl LONG $0x462ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rsi + 32] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x462ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rsi + 36] - WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x462ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rsi + 40] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x462ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rsi + 44] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x462ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rsi + 48] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x462ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rsi + 52] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x462ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rsi + 56] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al LONG $0x462ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rsi + 60] + WORD $0x9b0f; BYTE $0xd0 // setnp al WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x462ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rsi + 64] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl LONG $0x462ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rsi + 68] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl LONG $0x462ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rsi + 72] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al LONG $0x462ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rsi + 76] - QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x80248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], al LONG $0x462ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rsi + 80] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x78244488 // mov byte [rsp + 120], al LONG $0x462ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rsi + 84] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x68244488 // mov byte [rsp + 104], al LONG $0x462ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rsi + 88] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al LONG $0x462ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rsi + 92] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al LONG $0x462ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rsi + 96] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x70244488 // mov byte [rsp + 112], al LONG $0x462ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rsi + 100] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al LONG $0x462ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rsi + 104] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al LONG $0x462ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rsi + 108] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al LONG $0x462ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rsi + 112] - QUAD $0x000001402494940f // sete byte [rsp + 320] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0xa0248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], al LONG $0x462ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rsi + 116] - QUAD $0x000001202494940f // sete byte [rsp + 288] + LONG $0xd09b0f41 // setnp r8b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xc0 // and al, r8b + LONG $0x98248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], al LONG $0x462ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rsi + 120] - LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al LONG $0x462ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rsi + 124] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd0940f41 // sete r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x000000a0248c0244 // add r9b, byte [rsp + 160] + WORD $0x2041; BYTE $0xc0 // and r8b, al + WORD $0xdb00 // add bl, bl + LONG $0x20245c02 // add bl, byte [rsp + 32] + LONG $0x05e7c040 // shl dil, 5 + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x247cb60f; BYTE $0x50 // movzx edi, byte [rsp + 80] + LONG $0x05e7c040 // shl dil, 5 + WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x6cb60f44; WORD $0x4824 // movzx r13d, byte [rsp + 72] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xd5 // or r13b, dl + QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] + WORD $0xd200 // add dl, dl + LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd108 // or cl, dl QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + WORD $0xca08 // or dl, cl + WORD $0xd189 // mov ecx, edx + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + WORD $0xca08 // or dl, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xc108 // or cl, al + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + LONG $0x06e4c041 // shl r12b, 6 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0845; BYTE $0xe2 // or r10b, r12b + WORD $0x0841; BYTE $0xd2 // or r10b, dl + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x70 // add r15b, byte [rsp + 112] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xf3 // or r11b, r14b + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd8 // or al, r11b + WORD $0xc389 // mov ebx, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x0888 // mov byte [rax], cl + QUAD $0x000000982494b60f // movzx edx, byte [rsp + 152] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd1 // or r9b, dl + LONG $0x01688844 // mov byte [rax + 1], r13b LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + LONG $0x02508844 // mov byte [rax + 2], r10b + LONG $0x03408844 // mov byte [rax + 3], r8b LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_79 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] @@ -4488,7 +4858,7 @@ LBB1_79: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_127 - JMP LBB1_164 + JMP LBB1_165 LBB1_81: WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] @@ -4540,10 +4910,10 @@ LBB1_85: LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB1_168 + JAE LBB1_169 LONG $0xbb048d4b // lea rax, [r11 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB1_168 + JAE LBB1_169 LBB1_89: WORD $0xc031 // xor eax, eax @@ -4765,15 +5135,15 @@ LBB1_97: LONG $0x20fa8349 // cmp r10, 32 JL LBB1_133 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 LBB1_99: QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6940f41 // sete r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -4789,11 +5159,11 @@ LBB1_99: LONG $0x206e3944 // cmp dword [rsi + 32], r13d LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3940f41 // sete r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -4833,32 +5203,33 @@ LBB1_99: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x68 // add dil, byte [rsp + 104] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] @@ -4866,77 +5237,77 @@ LBB1_99: LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x48245402 // add dl, byte [rsp + 72] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x48244402 // add al, byte [rsp + 72] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1b // mov byte [r11], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x1c // movzx edx, byte [rsp + 28] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xc000 // add al, al + LONG $0x20244402 // add al, byte [rsp + 32] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x03438845 // mov byte [r11 + 3], r8b + LONG $0x03538841 // mov byte [r11 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB1_99 WORD $0x894d; BYTE $0xde // mov r14, r11 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB1_134 - JMP LBB1_164 + JMP LBB1_165 LBB1_101: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_102: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -4978,13 +5349,13 @@ LBB1_104: LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 JNE LBB1_104 - JMP LBB1_161 + JMP LBB1_162 LBB1_105: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_106: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -5002,7 +5373,7 @@ LBB1_108: LBB1_109: LONG $0x05e5c149 // shl r13, 5 WORD $0x394d; BYTE $0xfd // cmp r13, r15 - JGE LBB1_164 + JGE LBB1_165 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xe8 // sub r8, r13 WORD $0xf749; BYTE $0xd5 // not r13 @@ -5041,13 +5412,13 @@ LBB1_141: LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi JNE LBB1_141 - JMP LBB1_156 + JMP LBB1_157 LBB1_112: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_113: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -5095,7 +5466,7 @@ LBB1_116: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_117: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -5143,7 +5514,7 @@ LBB1_120: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_121: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -5160,7 +5531,7 @@ LBB1_123: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_124: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -5177,7 +5548,7 @@ LBB1_126: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_127: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -5195,59 +5566,63 @@ LBB1_129: LBB1_130: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB1_154 + JNE LBB1_155 LBB1_132: WORD $0xf631 // xor esi, esi - JMP LBB1_157 + JMP LBB1_158 LBB1_133: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_164 + JGE LBB1_165 LBB1_134: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB1_159 + JNE LBB1_160 LBB1_135: WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB1_161 + JMP LBB1_162 LBB1_136: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 LBB1_137: LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xc820 // and al, cl WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + LONG $0x14b60f45; BYTE $0x3f // movzx r10d, byte [r15 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b + WORD $0x3044; BYTE $0xd3 // xor bl, r10b LONG $0x3f1c8841 // mov byte [r15 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] LONG $0x10768d48 // lea rsi, [rsi + 16] + LONG $0xd29b0f41 // setnp r10b WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xd0 // and al, r10b WORD $0xd8f6 // neg al WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 @@ -5256,14 +5631,14 @@ LBB1_137: WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 JNE LBB1_137 LBB1_138: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_164 + JE LBB1_165 LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - JMP LBB1_163 + JMP LBB1_154 LBB1_142: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -5303,9 +5678,9 @@ LBB1_143: LBB1_144: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_164 + JE LBB1_165 LONG $0x2e394466 // cmp word [rsi], r13w - JMP LBB1_163 + JMP LBB1_164 LBB1_146: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -5345,35 +5720,39 @@ LBB1_147: LBB1_148: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_164 + JE LBB1_165 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - JMP LBB1_163 + JMP LBB1_164 LBB1_150: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 LBB1_151: LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xc820 // and al, cl WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + LONG $0x14b60f45; BYTE $0x3f // movzx r10d, byte [r15 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b + WORD $0x3044; BYTE $0xd3 // xor bl, r10b LONG $0x3f1c8841 // mov byte [r15 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] LONG $0x08768d48 // lea rsi, [rsi + 8] + LONG $0xd29b0f41 // setnp r10b WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xd0 // and al, r10b WORD $0xd8f6 // neg al WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 @@ -5382,22 +5761,39 @@ LBB1_151: WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 JNE LBB1_151 LBB1_152: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_164 + JE LBB1_165 LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - JMP LBB1_163 LBB1_154: + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xd8 // mov rax, r11 + LONG $0x03e8c148 // shr rax, 3 + LONG $0x06348a41 // mov sil, byte [r14 + rax] + LONG $0x07e38041 // and r11b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x061c8841 // mov byte [r14 + rax], bl + JMP LBB1_165 + +LBB1_155: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi QUAD $0x00000178249c8b4c // mov r11, qword [rsp + 376] -LBB1_155: +LBB1_156: LONG $0x34343845 // cmp byte [r12 + rsi], r14b WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl @@ -5424,14 +5820,14 @@ LBB1_155: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB1_155 + JNE LBB1_156 -LBB1_156: +LBB1_157: WORD $0x0149; BYTE $0xf4 // add r12, rsi -LBB1_157: +LBB1_158: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_164 + JE LBB1_165 LONG $0x24343845 // cmp byte [r12], r14b WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al @@ -5447,15 +5843,15 @@ LBB1_157: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB1_164 + JMP LBB1_165 -LBB1_159: +LBB1_160: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB1_160: +LBB1_161: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al @@ -5483,14 +5879,14 @@ LBB1_160: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB1_160 + JNE LBB1_161 -LBB1_161: +LBB1_162: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_164 + JE LBB1_165 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d -LBB1_163: +LBB1_164: WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 @@ -5505,12 +5901,12 @@ LBB1_163: WORD $0x3040; BYTE $0xf3 // xor bl, sil LONG $0x161c8841 // mov byte [r14 + rdx], bl -LBB1_164: +LBB1_165: MOVQ 1280(SP), SP VZEROUPPER RET -LBB1_165: +LBB1_166: LONG $0xe0e58349 // and r13, -32 WORD $0x894c; BYTE $0xe8 // mov rax, r13 LONG $0x05e0c148 // shl rax, 5 @@ -5525,7 +5921,7 @@ LBB1_165: WORD $0xc031 // xor eax, eax QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB1_166: +LBB1_167: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000019824848948 // mov qword [rsp + 408], rax LONG $0x05e3c148 // shl rbx, 5 @@ -5626,13 +6022,13 @@ LBB1_166: LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax WORD $0x8949; BYTE $0xdf // mov r15, rbx LONG $0x80cf8149; WORD $0x0002; BYTE $0x00 // or r15, 640 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 + QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 WORD $0x8949; BYTE $0xdb // mov r11, rbx LONG $0xa0cb8149; WORD $0x0002; BYTE $0x00 // or r11, 672 QUAD $0x000000c8249c894c // mov qword [rsp + 200], r11 WORD $0x8949; BYTE $0xd8 // mov r8, rbx LONG $0xc0c88149; WORD $0x0002; BYTE $0x00 // or r8, 704 - QUAD $0x000000a82484894c // mov qword [rsp + 168], r8 + QUAD $0x000000b02484894c // mov qword [rsp + 176], r8 WORD $0x8948; BYTE $0xda // mov rdx, rbx LONG $0xe0ca8148; WORD $0x0002; BYTE $0x00 // or rdx, 736 QUAD $0x000000c024948948 // mov qword [rsp + 192], rdx @@ -5715,11 +6111,11 @@ LBB1_166: QUAD $0x020116642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rdx + 1], 2 LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] QUAD $0x030116642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rdx + 1], 3 - QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] QUAD $0x040116642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rdx + 1], 4 QUAD $0x000000c824948b48 // mov rdx, qword [rsp + 200] QUAD $0x050116642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rdx + 1], 5 - QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] QUAD $0x060116642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rdx + 1], 6 QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] QUAD $0x070116642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rdx + 1], 7 @@ -5774,11 +6170,11 @@ LBB1_166: QUAD $0x02020e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rcx + 2], 2 LONG $0x24548b4c; BYTE $0x68 // mov r10, qword [rsp + 104] QUAD $0x030216442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r10 + 2], 3 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x040206442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 2], 4 QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x050206442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 2], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x060206442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 2], 6 QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] QUAD $0x070206442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 2], 7 @@ -5831,11 +6227,11 @@ LBB1_166: QUAD $0x010316642021e3c4 // vpinsrb xmm4, xmm11, byte [rsi + rdx + 3], 1 QUAD $0x02030e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rcx + 3], 2 QUAD $0x030316642059a3c4 // vpinsrb xmm4, xmm4, byte [rsi + r10 + 3], 3 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x04030e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rcx + 3], 4 QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x05030e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rcx + 3], 5 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] QUAD $0x06030e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rcx + 3], 6 QUAD $0x000000c024848b4c // mov r8, qword [rsp + 192] QUAD $0x070306642059a3c4 // vpinsrb xmm4, xmm4, byte [rsi + r8 + 3], 7 @@ -5895,11 +6291,11 @@ LBB1_166: QUAD $0x020406442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 4], 2 LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x030406442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 4], 3 - QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] + QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] QUAD $0x04042e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r13 + 4], 4 QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x05040e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rcx + 4], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x060406442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 4], 6 QUAD $0x070406442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r8 + 4], 7 QUAD $0x080426442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r12 + 4], 8 @@ -5949,7 +6345,7 @@ LBB1_166: QUAD $0x03050e642059a3c4 // vpinsrb xmm4, xmm4, byte [rsi + r9 + 5], 3 QUAD $0x04052e642059a3c4 // vpinsrb xmm4, xmm4, byte [rsi + r13 + 5], 4 QUAD $0x05050e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rcx + 5], 5 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] QUAD $0x06050e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rcx + 5], 6 QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] QUAD $0x07050e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rcx + 5], 7 @@ -6007,11 +6403,11 @@ LBB1_166: QUAD $0x02063e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r15 + 6], 2 LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] QUAD $0x03060e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r9 + 6], 3 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] QUAD $0x04061e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rbx + 6], 4 QUAD $0x000000c824948b48 // mov rdx, qword [rsp + 200] QUAD $0x050616442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 6], 5 - QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] + QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] QUAD $0x06061e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r11 + 6], 6 QUAD $0x07062e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r13 + 6], 7 QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] @@ -6126,7 +6522,7 @@ LBB1_166: QUAD $0x04081e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rbx + 8], 4 WORD $0x894c; BYTE $0xf3 // mov rbx, r14 QUAD $0x050836442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r14 + 8], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x060806442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 8], 6 QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] QUAD $0x070816442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 8], 7 @@ -6175,7 +6571,7 @@ LBB1_166: QUAD $0x01092e742039a3c4 // vpinsrb xmm6, xmm8, byte [rsi + r13 + 9], 1 QUAD $0x020926742049a3c4 // vpinsrb xmm6, xmm6, byte [rsi + r12 + 9], 2 QUAD $0x03090e742049e3c4 // vpinsrb xmm6, xmm6, byte [rsi + rcx + 9], 3 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x04090e742049e3c4 // vpinsrb xmm6, xmm6, byte [rsi + rcx + 9], 4 QUAD $0x05091e742049e3c4 // vpinsrb xmm6, xmm6, byte [rsi + rbx + 9], 5 QUAD $0x060906742049e3c4 // vpinsrb xmm6, xmm6, byte [rsi + rax + 9], 6 @@ -6236,11 +6632,11 @@ LBB1_166: QUAD $0x020a1e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rbx + 10], 2 LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x030a065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 10], 3 - QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] + QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] QUAD $0x040a0e5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r9 + 10], 4 QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x050a065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 10], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x060a065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 10], 6 QUAD $0x070a165c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdx + 10], 7 QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] @@ -6291,7 +6687,7 @@ LBB1_166: QUAD $0x040b0e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r9 + 11], 4 QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] QUAD $0x050b1e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rbx + 11], 5 - QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] + QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] QUAD $0x060b2e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r13 + 11], 6 QUAD $0x070b164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 11], 7 WORD $0x8949; BYTE $0xd1 // mov r9, rdx @@ -6350,7 +6746,7 @@ LBB1_166: QUAD $0x020c0e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rcx + 12], 2 LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x030c0e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rcx + 12], 3 - QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] QUAD $0x040c16442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 12], 4 QUAD $0x050c1e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rbx + 12], 5 WORD $0x894c; BYTE $0xeb // mov rbx, r13 @@ -6463,11 +6859,11 @@ LBB1_166: QUAD $0x020e164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 14], 2 LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] QUAD $0x030e1e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rbx + 14], 3 - QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] QUAD $0x040e064c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r8 + 14], 4 QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x050e064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 14], 5 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] QUAD $0x060e0e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rcx + 14], 6 QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] QUAD $0x070e064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 14], 7 @@ -6582,10 +6978,10 @@ LBB1_166: QUAD $0x021006442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 16], 2 LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] QUAD $0x03101e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r11 + 16], 3 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x041006442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 16], 4 QUAD $0x05101e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rbx + 16], 5 - QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] + QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] QUAD $0x06100e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r9 + 16], 6 QUAD $0x071016442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 16], 7 QUAD $0x000000d824948b48 // mov rdx, qword [rsp + 216] @@ -6638,7 +7034,7 @@ LBB1_166: QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] QUAD $0x021106542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r8 + 17], 2 QUAD $0x03111e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r11 + 17], 3 - QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] + QUAD $0x000000a824948b4c // mov r10, qword [rsp + 168] QUAD $0x041116542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r10 + 17], 4 QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x05113e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 17], 5 @@ -6703,7 +7099,7 @@ LBB1_166: QUAD $0x041216442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r10 + 18], 4 QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x051206442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 18], 5 - QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] + QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] QUAD $0x061206442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r8 + 18], 6 QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] QUAD $0x071206442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 18], 7 @@ -6757,7 +7153,7 @@ LBB1_166: QUAD $0x02133e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 19], 2 LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x03133e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 19], 3 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x04133e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 19], 4 QUAD $0x000000c824ac8b4c // mov r13, qword [rsp + 200] QUAD $0x05132e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r13 + 19], 5 @@ -6817,10 +7213,10 @@ LBB1_166: QUAD $0x021416442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 20], 2 LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x03143e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 20], 3 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x04143e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 20], 4 QUAD $0x05142e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r13 + 20], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x06143e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 20], 6 QUAD $0x000000c024ac8b4c // mov r13, qword [rsp + 192] QUAD $0x07142e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r13 + 20], 7 @@ -6876,11 +7272,11 @@ LBB1_166: QUAD $0x021516542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdx + 21], 2 LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] QUAD $0x031516542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdx + 21], 3 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x041506542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rax + 21], 4 QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x051506542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rax + 21], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x061506542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rax + 21], 6 QUAD $0x07152e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r13 + 21], 7 QUAD $0x08153e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r15 + 21], 8 @@ -6936,11 +7332,11 @@ LBB1_166: QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] QUAD $0x02163e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 22], 2 QUAD $0x031616442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 22], 3 - QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] QUAD $0x041616442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 22], 4 QUAD $0x000000c824948b48 // mov rdx, qword [rsp + 200] QUAD $0x051616442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 22], 5 - QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] QUAD $0x061616442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 22], 6 QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] QUAD $0x071616442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 22], 7 @@ -6993,11 +7389,11 @@ LBB1_166: QUAD $0x021706542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r8 + 23], 2 LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x03173e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 23], 3 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x04173e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 23], 4 QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x05173e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 23], 5 - QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] + QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] QUAD $0x06172e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r13 + 23], 6 QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] QUAD $0x07173e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 23], 7 @@ -7052,7 +7448,7 @@ LBB1_166: QUAD $0x021806442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r8 + 24], 2 LONG $0x24548b4c; BYTE $0x68 // mov r10, qword [rsp + 104] QUAD $0x031816442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r10 + 24], 3 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x04183e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 24], 4 QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x05183e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 24], 5 @@ -7107,11 +7503,11 @@ LBB1_166: QUAD $0x000000e8249c8b48 // mov rbx, qword [rsp + 232] QUAD $0x02191e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rbx + 25], 2 QUAD $0x031916542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r10 + 25], 3 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x041906542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rax + 25], 4 QUAD $0x000000c824b48b4c // mov r14, qword [rsp + 200] QUAD $0x051936542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r14 + 25], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x061906542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rax + 25], 6 QUAD $0x071906542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r8 + 25], 7 QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] @@ -7169,10 +7565,10 @@ LBB1_166: QUAD $0x021a1e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rbx + 26], 2 LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] QUAD $0x031a1e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rbx + 26], 3 - QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] QUAD $0x041a16442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 26], 4 QUAD $0x051a36442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r14 + 26], 5 - QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] + QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] QUAD $0x061a0e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r9 + 26], 6 QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] QUAD $0x071a3e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 26], 7 @@ -7286,10 +7682,10 @@ LBB1_166: QUAD $0x021c0e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rcx + 28], 2 LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x031c3e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 28], 3 - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] QUAD $0x041c1e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r11 + 28], 4 QUAD $0x051c06442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r8 + 28], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x061c3e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 28], 6 QUAD $0x071c06442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 28], 7 QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] @@ -7346,7 +7742,7 @@ LBB1_166: QUAD $0x041d1e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r11 + 29], 4 QUAD $0x000000c8249c8b4c // mov r11, qword [rsp + 200] QUAD $0x051d1e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r11 + 29], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x061d3e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 29], 6 QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] QUAD $0x071d3e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 29], 7 @@ -7407,12 +7803,12 @@ LBB1_166: QUAD $0x021f064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 31], 2 QUAD $0x031e0e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rcx + 30], 3 QUAD $0x031f0e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rcx + 31], 3 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x041e06442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 30], 4 QUAD $0x041f064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 31], 4 QUAD $0x051e1e442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r11 + 30], 5 QUAD $0x051f1e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r11 + 31], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x061e06442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rax + 30], 6 QUAD $0x061f064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 31], 6 QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] @@ -7624,7 +8020,7 @@ LBB1_166: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB1_166 + JNE LBB1_167 QUAD $0x0000018824ac8b4c // mov r13, qword [rsp + 392] QUAD $0x0000018024ac3b4c // cmp r13, qword [rsp + 384] QUAD $0x0000011824bc8b4c // mov r15, qword [rsp + 280] @@ -7633,7 +8029,7 @@ LBB1_166: JNE LBB1_36 JMP LBB1_109 -LBB1_168: +LBB1_169: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 @@ -7648,7 +8044,7 @@ LBB1_168: WORD $0xc031 // xor eax, eax QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB1_169: +LBB1_170: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000019824848948 // mov qword [rsp + 408], rax LONG $0x05e3c148 // shl rbx, 5 @@ -7660,13 +8056,13 @@ LBB1_169: QUAD $0x0000009824848948 // mov qword [rsp + 152], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x60c88348 // or rax, 96 - QUAD $0x000000b024848948 // mov qword [rsp + 176], rax + QUAD $0x000000a824848948 // mov qword [rsp + 168], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00800d48; WORD $0x0000 // or rax, 128 LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00a00d48; WORD $0x0000 // or rax, 160 - QUAD $0x000000a824848948 // mov qword [rsp + 168], rax + QUAD $0x000000b024848948 // mov qword [rsp + 176], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00c00d48; WORD $0x0000 // or rax, 192 QUAD $0x000000e824848948 // mov qword [rsp + 232], rax @@ -7805,11 +8201,11 @@ LBB1_169: LONG $0x2061a3c4; WORD $0x361c; BYTE $0x01 // vpinsrb xmm3, xmm3, byte [rsi + r14], 1 QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] LONG $0x2061a3c4; WORD $0x161c; BYTE $0x02 // vpinsrb xmm3, xmm3, byte [rsi + r10], 2 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] LONG $0x2061a3c4; WORD $0x261c; BYTE $0x03 // vpinsrb xmm3, xmm3, byte [rsi + r12], 3 LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] LONG $0x2061a3c4; WORD $0x061c; BYTE $0x04 // vpinsrb xmm3, xmm3, byte [rsi + r8], 4 - QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] + QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] LONG $0x2061a3c4; WORD $0x1e1c; BYTE $0x05 // vpinsrb xmm3, xmm3, byte [rsi + r11], 5 QUAD $0x000000e8248c8b4c // mov r9, qword [rsp + 232] LONG $0x2061a3c4; WORD $0x0e1c; BYTE $0x06 // vpinsrb xmm3, xmm3, byte [rsi + r9], 6 @@ -7924,11 +8320,11 @@ LBB1_169: QUAD $0x0102065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 2], 1 QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x02023e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 2], 2 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x03023e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 2], 3 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x04023e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 2], 4 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x05023e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 2], 5 QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] QUAD $0x06023e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 2], 6 @@ -7973,11 +8369,11 @@ LBB1_169: QUAD $0x0103066c2039e3c4 // vpinsrb xmm5, xmm8, byte [rsi + rax + 3], 1 QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] QUAD $0x02031e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rbx + 3], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0303066c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rax + 3], 3 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x0403066c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rax + 3], 4 - QUAD $0x000000a824948b4c // mov r10, qword [rsp + 168] + QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] QUAD $0x0503166c2051a3c4 // vpinsrb xmm5, xmm5, byte [rsi + r10 + 3], 5 QUAD $0x000000e824b48b4c // mov r14, qword [rsp + 232] QUAD $0x0603366c2051a3c4 // vpinsrb xmm5, xmm5, byte [rsi + r14 + 3], 6 @@ -8037,7 +8433,7 @@ LBB1_169: LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x0104065c2011e3c4 // vpinsrb xmm3, xmm13, byte [rsi + rax + 4], 1 QUAD $0x02041e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rbx + 4], 2 - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] QUAD $0x03041e5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r11 + 4], 3 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x0404065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 4], 4 @@ -8092,7 +8488,7 @@ LBB1_169: QUAD $0x0205166c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rdx + 5], 2 QUAD $0x03051e6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rsi + r11 + 5], 3 QUAD $0x0405066c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rax + 5], 4 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0505066c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rax + 5], 5 QUAD $0x06053e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rdi + 5], 6 QUAD $0x0705166c2051a3c4 // vpinsrb xmm5, xmm5, byte [rsi + r10 + 5], 7 @@ -8147,11 +8543,11 @@ LBB1_169: QUAD $0x0106266c2041a3c4 // vpinsrb xmm5, xmm7, byte [rsi + r12 + 6], 1 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x02060e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rcx + 6], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x03060e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rcx + 6], 3 LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] QUAD $0x04060e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rcx + 6], 4 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x05063e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rdi + 6], 5 QUAD $0x000000e8249c8b48 // mov rbx, qword [rsp + 232] QUAD $0x06061e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rbx + 6], 6 @@ -8200,7 +8596,7 @@ LBB1_169: QUAD $0x01070e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rcx + 7], 1 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x02070e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rcx + 7], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x03070e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rcx + 7], 3 LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] QUAD $0x0407164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 7], 4 @@ -8262,11 +8658,11 @@ LBB1_169: QUAD $0x0108066c2029e3c4 // vpinsrb xmm5, xmm10, byte [rsi + rax + 8], 1 QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] QUAD $0x02080e6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rsi + r9 + 8], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0308066c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rax + 8], 3 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x04083e6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rdi + 8], 4 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0508066c2051e3c4 // vpinsrb xmm5, xmm5, byte [rsi + rax + 8], 5 QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] QUAD $0x06083e6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rsi + r15 + 8], 6 @@ -8313,10 +8709,10 @@ LBB1_169: LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x01090e7c2021e3c4 // vpinsrb xmm7, xmm11, byte [rsi + rcx + 9], 1 QUAD $0x02090e7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rsi + r9 + 9], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x03090e7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rsi + rcx + 9], 3 QUAD $0x04093e7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rsi + rdi + 9], 4 - QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] + QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] QUAD $0x05091e7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rsi + r11 + 9], 5 QUAD $0x06093e7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rsi + r15 + 9], 6 QUAD $0x000000d8249c8b48 // mov rbx, qword [rsp + 216] @@ -8376,7 +8772,7 @@ LBB1_169: QUAD $0x010a06642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rax + 10], 1 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x020a06642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rax + 10], 2 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x030a3e642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rdi + 10], 3 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x040a06642059e3c4 // vpinsrb xmm4, xmm4, byte [rsi + rax + 10], 4 @@ -8431,7 +8827,7 @@ LBB1_169: QUAD $0x030b3e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 11], 3 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x040b3e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 11], 4 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x050b3e542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdi + 11], 5 QUAD $0x060b1e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r11 + 11], 6 QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] @@ -8489,11 +8885,11 @@ LBB1_169: QUAD $0x010c16542051e3c4 // vpinsrb xmm2, xmm5, byte [rsi + rdx + 12], 1 WORD $0x894c; BYTE $0xf7 // mov rdi, r14 QUAD $0x020c36542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r14 + 12], 2 - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] QUAD $0x030c1e542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r11 + 12], 3 LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] QUAD $0x040c16542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rdx + 12], 4 - QUAD $0x000000a824b48b4c // mov r14, qword [rsp + 168] + QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] QUAD $0x050c36542069a3c4 // vpinsrb xmm2, xmm2, byte [rsi + r14 + 12], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x060c06542069e3c4 // vpinsrb xmm2, xmm2, byte [rsi + rax + 12], 6 @@ -8603,7 +8999,7 @@ LBB1_169: QUAD $0x010e16442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdx + 14], 1 QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x020e3e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 14], 2 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] QUAD $0x030e26442079a3c4 // vpinsrb xmm0, xmm0, byte [rsi + r12 + 14], 3 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x040e3e442079e3c4 // vpinsrb xmm0, xmm0, byte [rsi + rdi + 14], 4 @@ -8662,7 +9058,7 @@ LBB1_169: QUAD $0x030f265c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r12 + 15], 3 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x040f065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 15], 4 - QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] QUAD $0x050f165c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdx + 15], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x060f065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 15], 6 @@ -8722,7 +9118,7 @@ LBB1_169: QUAD $0x01103e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdi + 16], 1 QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x02103e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdi + 16], 2 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x03103e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdi + 16], 3 LONG $0x246c8b4c; BYTE $0x78 // mov r13, qword [rsp + 120] QUAD $0x04102e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r13 + 16], 4 @@ -8778,10 +9174,10 @@ LBB1_169: QUAD $0x01110e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rcx + 17], 1 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x02110e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rcx + 17], 2 - QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] QUAD $0x0311065c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r8 + 17], 3 QUAD $0x04112e5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r13 + 17], 4 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x05113e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 17], 5 QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] QUAD $0x06113e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 17], 6 @@ -8841,7 +9237,7 @@ LBB1_169: QUAD $0x0312064c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r8 + 18], 3 LONG $0x24548b4c; BYTE $0x78 // mov r10, qword [rsp + 120] QUAD $0x0412164c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r10 + 18], 4 - QUAD $0x000000a824b48b4c // mov r14, qword [rsp + 168] + QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] QUAD $0x0512364c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r14 + 18], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x0612064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 18], 6 @@ -8898,7 +9294,7 @@ LBB1_169: QUAD $0x01132e5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r13 + 19], 1 QUAD $0x0000009824948b48 // mov rdx, qword [rsp + 152] QUAD $0x0213165c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdx + 19], 2 - QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] QUAD $0x0313165c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdx + 19], 3 QUAD $0x0413165c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r10 + 19], 4 QUAD $0x0513365c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r14 + 19], 5 @@ -8963,7 +9359,7 @@ LBB1_169: QUAD $0x0314164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 20], 3 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x0414064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 20], 4 - QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] QUAD $0x0514164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 20], 5 QUAD $0x06141e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rbx + 20], 6 QUAD $0x000000d824948b48 // mov rdx, qword [rsp + 216] @@ -9016,10 +9412,10 @@ LBB1_169: LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x01153e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 21], 1 QUAD $0x0215065c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r8 + 21], 2 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x03153e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 21], 3 QUAD $0x0415065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 21], 4 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0515065c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rax + 21], 5 QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] QUAD $0x0615065c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r8 + 21], 6 @@ -9076,11 +9472,11 @@ LBB1_169: QUAD $0x0116164c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r10 + 22], 1 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x02160e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rcx + 22], 2 - QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] QUAD $0x0316164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 22], 3 LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] QUAD $0x0416164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 22], 4 - QUAD $0x000000a824948b48 // mov rdx, qword [rsp + 168] + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] QUAD $0x0516164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 22], 5 QUAD $0x0616064c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r8 + 22], 6 QUAD $0x07163e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r15 + 22], 7 @@ -9133,11 +9529,11 @@ LBB1_169: LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x0117165c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r10 + 23], 1 QUAD $0x02170e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rcx + 23], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x03170e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rcx + 23], 3 LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] QUAD $0x04171e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rbx + 23], 4 - QUAD $0x000000a824948b4c // mov r10, qword [rsp + 168] + QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] QUAD $0x0517165c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r10 + 23], 5 QUAD $0x0617065c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r8 + 23], 6 QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] @@ -9251,11 +9647,11 @@ LBB1_169: QUAD $0x0119165c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdx + 25], 1 QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x02193e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 25], 2 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x03193e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 25], 3 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x04193e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 25], 4 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x05193e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 25], 5 QUAD $0x0619165c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r10 + 25], 6 QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] @@ -9308,11 +9704,11 @@ LBB1_169: QUAD $0x011a164c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdx + 26], 1 QUAD $0x0000009824b48b4c // mov r14, qword [rsp + 152] QUAD $0x021a364c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r14 + 26], 2 - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] QUAD $0x031a3e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r15 + 26], 3 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x041a064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 26], 4 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x051a064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 26], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x061a064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 26], 6 @@ -9370,7 +9766,7 @@ LBB1_169: QUAD $0x031b3e5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r15 + 27], 3 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x041b3e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 27], 4 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x051b3e5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rsi + rdi + 27], 5 QUAD $0x000000e824b48b4c // mov r14, qword [rsp + 232] QUAD $0x061b365c2061a3c4 // vpinsrb xmm3, xmm3, byte [rsi + r14 + 27], 6 @@ -9428,11 +9824,11 @@ LBB1_169: QUAD $0x011c064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 28], 1 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x021c064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 28], 2 - QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] + QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] QUAD $0x031c2e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r13 + 28], 3 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x041c3e4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rdi + 28], 4 - QUAD $0x000000a824948b4c // mov r10, qword [rsp + 168] + QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] QUAD $0x051c164c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r10 + 28], 5 QUAD $0x061c364c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r14 + 28], 6 WORD $0x894d; BYTE $0xfe // mov r14, r15 @@ -9569,12 +9965,12 @@ LBB1_169: QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x021e064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 30], 2 QUAD $0x021f067c2041e3c4 // vpinsrb xmm7, xmm7, byte [rsi + rax + 31], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x031e064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 30], 3 QUAD $0x031f067c2041e3c4 // vpinsrb xmm7, xmm7, byte [rsi + rax + 31], 3 QUAD $0x041e2e4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r13 + 30], 4 QUAD $0x041f2e7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rsi + r13 + 31], 4 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x051e064c2071e3c4 // vpinsrb xmm1, xmm1, byte [rsi + rax + 30], 5 QUAD $0x051f067c2041e3c4 // vpinsrb xmm7, xmm7, byte [rsi + rax + 31], 5 QUAD $0x061e164c2071a3c4 // vpinsrb xmm1, xmm1, byte [rsi + r10 + 30], 6 @@ -9739,7 +10135,7 @@ LBB1_169: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB1_169 + JNE LBB1_170 QUAD $0x0000018824bc8b4c // mov r15, qword [rsp + 392] QUAD $0x0000018024bc3b4c // cmp r15, qword [rsp + 384] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] @@ -9793,7 +10189,7 @@ TEXT ·_comparison_equal_scalar_arr_avx2(SB), $1320-48 LEAQ LCDATA2<>(SB), BP WORD $0x894d; BYTE $0xc2 // mov r10, r8 - WORD $0x8949; BYTE $0xcb // mov r11, rcx + WORD $0x8949; BYTE $0xce // mov r14, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB2_17 WORD $0xff83; BYTE $0x03 // cmp edi, 3 @@ -9803,11 +10199,11 @@ TEXT ·_comparison_equal_scalar_arr_avx2(SB), $1320-48 WORD $0xff83; BYTE $0x05 // cmp edi, 5 JE LBB2_72 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB2_157 + JNE LBB2_165 WORD $0x8b44; BYTE $0x2e // mov r13d, dword [rsi] - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -9825,8 +10221,8 @@ LBB2_7: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xd9 // mov r9, r11 - LONG $0x04b60f45; BYTE $0x33 // movzx r8d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf1 // mov r9, r14 + LONG $0x04b60f45; BYTE $0x36 // movzx r8d, byte [r14 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -9835,40 +10231,40 @@ LBB2_7: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_7 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_9: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_13 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 + QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 LBB2_11: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] - QUAD $0x000000982494940f // sete byte [rsp + 152] + QUAD $0x000000a02494940f // sete byte [rsp + 160] LONG $0x046a3b44 // cmp r13d, dword [rdx + 4] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086a3b44 // cmp r13d, dword [rdx + 8] LONG $0xd6940f41 // sete r14b LONG $0x0c6a3b44 // cmp r13d, dword [rdx + 12] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000882494940f // sete byte [rsp + 136] LONG $0x106a3b44 // cmp r13d, dword [rdx + 16] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x146a3b44 // cmp r13d, dword [rdx + 20] - QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x186a3b44 // cmp r13d, dword [rdx + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x1c6a3b44 // cmp r13d, dword [rdx + 28] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x206a3b44 // cmp r13d, dword [rdx + 32] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + QUAD $0x000000902494940f // sete byte [rsp + 144] LONG $0x246a3b44 // cmp r13d, dword [rdx + 36] LONG $0xd6940f40 // sete sil LONG $0x286a3b44 // cmp r13d, dword [rdx + 40] @@ -9876,37 +10272,37 @@ LBB2_11: LONG $0x2c6a3b44 // cmp r13d, dword [rdx + 44] LONG $0xd1940f41 // sete r9b LONG $0x306a3b44 // cmp r13d, dword [rdx + 48] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x346a3b44 // cmp r13d, dword [rdx + 52] LONG $0xd4940f41 // sete r12b LONG $0x386a3b44 // cmp r13d, dword [rdx + 56] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x3c6a3b44 // cmp r13d, dword [rdx + 60] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x406a3b44 // cmp r13d, dword [rdx + 64] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x446a3b44 // cmp r13d, dword [rdx + 68] LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x446a3b44 // cmp r13d, dword [rdx + 68] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] LONG $0x486a3b44 // cmp r13d, dword [rdx + 72] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x4c6a3b44 // cmp r13d, dword [rdx + 76] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x506a3b44 // cmp r13d, dword [rdx + 80] - QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x546a3b44 // cmp r13d, dword [rdx + 84] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0x586a3b44 // cmp r13d, dword [rdx + 88] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x5c6a3b44 // cmp r13d, dword [rdx + 92] LONG $0xd7940f41 // sete r15b LONG $0x606a3b44 // cmp r13d, dword [rdx + 96] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x646a3b44 // cmp r13d, dword [rdx + 100] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] LONG $0x686a3b44 // cmp r13d, dword [rdx + 104] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x6c6a3b44 // cmp r13d, dword [rdx + 108] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x706a3b44 // cmp r13d, dword [rdx + 112] QUAD $0x000001402494940f // sete byte [rsp + 320] LONG $0x746a3b44 // cmp r13d, dword [rdx + 116] @@ -9914,111 +10310,109 @@ LBB2_11: LONG $0x786a3b44 // cmp r13d, dword [rdx + 120] LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0x7c6a3b44 // cmp r13d, dword [rdx + 124] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000a024940244 // add r10b, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x50 // add sil, byte [rsp + 80] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x0000009024b40240 // add sil, byte [rsp + 144] + QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x58 // movzx esi, byte [rsp + 88] + WORD $0x0845; BYTE $0xdc // or r12b, r11b + QUAD $0x0000009824b4b60f // movzx esi, byte [rsp + 152] LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b - QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xdb00 // add bl, bl + LONG $0x60245c02 // add bl, byte [rsp + 96] + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0x8841; BYTE $0x3e // mov byte [r14], dil + QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] + LONG $0x06e6c040 // shl sil, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x014b8841 // mov byte [r11 + 1], cl WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + LONG $0x01468841 // mov byte [r14 + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x035b8841 // mov byte [r11 + 3], bl + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x027e8845 // mov byte [r14 + 2], r15b + LONG $0x034e8841 // mov byte [r14 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c68349 // add r14, 4 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB2_11 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] LBB2_13: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 JE LBB2_127 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 @@ -10030,8 +10424,8 @@ LBB2_16: WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf3 // mov r11, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -10039,7 +10433,7 @@ LBB2_16: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + LONG $0x361c8841 // mov byte [r14 + rsi], bl LONG $0x02c78348 // add rdi, 2 LONG $0x046a3b44 // cmp r13d, dword [rdx + 4] LONG $0x08528d48 // lea rdx, [rdx + 8] @@ -10051,10 +10445,10 @@ LBB2_16: WORD $0xe0d2 // shl al, cl WORD $0x2044; BYTE $0xc8 // and al, r9b WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al + LONG $0x36048841 // mov byte [r14 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB2_16 - JMP LBB2_154 + JMP LBB2_155 LBB2_17: WORD $0xff83; BYTE $0x08 // cmp edi, 8 @@ -10064,10 +10458,10 @@ LBB2_17: WORD $0xff83; BYTE $0x0b // cmp edi, 11 JE LBB2_94 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB2_157 - LONG $0x1f728d4d // lea r14, [r10 + 31] + JNE LBB2_165 + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -10080,14 +10474,16 @@ LBB2_17: LBB2_23: LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl WORD $0xdbf6 // neg bl LONG $0x07708d48 // lea rsi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xdf // mov r15, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf7 // mov r15, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0x3044; BYTE $0xcb // xor bl, r9b QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -10096,197 +10492,286 @@ LBB2_23: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_23 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_25: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_29 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 - QUAD $0x0000009824b4894c // mov qword [rsp + 152], r14 + QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 + QUAD $0x000000b8249c894c // mov qword [rsp + 184], r11 LBB2_27: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x422ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rdx + 8] - LONG $0xd0940f41 // sete r8b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al LONG $0x422ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rdx + 16] - LONG $0xd6940f41 // sete r14b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x422ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rdx + 24] - LONG $0xd5940f41 // sete r13b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl LONG $0x422ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x422ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rdx + 40] - QUAD $0x000000902494940f // sete byte [rsp + 144] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al LONG $0x422ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rdx + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl LONG $0x422ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rdx + 56] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl LONG $0x422ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rdx + 64] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x422ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rdx + 72] - LONG $0xd6940f40 // sete sil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl LONG $0x422ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rdx + 80] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl LONG $0x422ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rdx + 88] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x422ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rdx + 96] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl LONG $0x422ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rdx + 104] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x422ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x422ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rdx + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl QUAD $0x00000080822ef9c5 // vucomisd xmm0, qword [rdx + 128] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl QUAD $0x00000088822ef9c5 // vucomisd xmm0, qword [rdx + 136] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl QUAD $0x00000090822ef9c5 // vucomisd xmm0, qword [rdx + 144] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl QUAD $0x00000098822ef9c5 // vucomisd xmm0, qword [rdx + 152] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl QUAD $0x000000a0822ef9c5 // vucomisd xmm0, qword [rdx + 160] - QUAD $0x000000882494940f // sete byte [rsp + 136] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl QUAD $0x000000a8822ef9c5 // vucomisd xmm0, qword [rdx + 168] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl QUAD $0x000000b0822ef9c5 // vucomisd xmm0, qword [rdx + 176] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - QUAD $0x000000b8822ef9c5 // vucomisd xmm0, qword [rdx + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al + QUAD $0x000000b8822ef9c5 // vucomisd xmm0, qword [rdx + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al QUAD $0x000000c0822ef9c5 // vucomisd xmm0, qword [rdx + 192] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl QUAD $0x000000c8822ef9c5 // vucomisd xmm0, qword [rdx + 200] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al QUAD $0x000000d0822ef9c5 // vucomisd xmm0, qword [rdx + 208] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al QUAD $0x000000d8822ef9c5 // vucomisd xmm0, qword [rdx + 216] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al QUAD $0x000000e0822ef9c5 // vucomisd xmm0, qword [rdx + 224] - QUAD $0x000001402494940f // sete byte [rsp + 320] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xa8248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 168], cl QUAD $0x000000e8822ef9c5 // vucomisd xmm0, qword [rdx + 232] - QUAD $0x000001202494940f // sete byte [rsp + 288] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl QUAD $0x000000f0822ef9c5 // vucomisd xmm0, qword [rdx + 240] - LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al QUAD $0x000000f8822ef9c5 // vucomisd xmm0, qword [rdx + 248] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000a024840244 // add r8b, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xc6 // or r14b, r8b - WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x58 // add sil, byte [rsp + 88] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0841; BYTE $0xf9 // or r9b, dil - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f40 // sete sil + WORD $0x2040; BYTE $0xc6 // and sil, al + WORD $0x0045; BYTE $0xed // add r13b, r13b + LONG $0x246c0244; BYTE $0x38 // add r13b, byte [rsp + 56] + WORD $0x8944; BYTE $0xe8 // mov eax, r13d LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x60 // movzx esi, byte [rsp + 96] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] - WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0x8845; BYTE $0x1e // mov byte [r14], r11b - LONG $0x2474b60f; BYTE $0x38 // movzx esi, byte [rsp + 56] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + QUAD $0x00008024acb60f44; BYTE $0x00 // movzx r13d, byte [rsp + 128] + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x64b60f44; WORD $0x2024 // movzx r12d, byte [rsp + 32] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + WORD $0xc900 // add cl, cl + LONG $0x30244c02 // add cl, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xcb08 // or bl, cl + LONG $0x6cb60f44; WORD $0x2824 // movzx r13d, byte [rsp + 40] + LONG $0x04e5c041 // shl r13b, 4 + WORD $0x0841; BYTE $0xc5 // or r13b, al + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + LONG $0x20248488; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], al + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + LONG $0x74b60f44; WORD $0x5024 // movzx r14d, byte [rsp + 80] + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + WORD $0xc900 // add cl, cl + LONG $0x68244c02 // add cl, byte [rsp + 104] + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x035e8841 // mov byte [r14 + 3], bl - LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - WORD $0x894d; BYTE $0xf3 // mov r11, r14 - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB2_27 - QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000a824b48b4c // mov r14, qword [rsp + 168] - -LBB2_29: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 - JNE LBB2_136 - WORD $0xff31 // xor edi, edi - JMP LBB2_138 - + QUAD $0x00000098248cb60f // movzx ecx, byte [rsp + 152] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + QUAD $0x00000090248cb60f // movzx ecx, byte [rsp + 144] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0xd908 // or cl, bl + WORD $0x8941; BYTE $0xcc // mov r12d, ecx + QUAD $0x00000110248c8b48 // mov rcx, qword [rsp + 272] + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0x0844; BYTE $0xe8 // or al, r13b + QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c041 // shl r15b, 6 + WORD $0x0841; BYTE $0xdf // or r15b, bl + QUAD $0x0000012024b40a44 // or r14b, byte [rsp + 288] + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf8 // or r8b, r15b + WORD $0x0845; BYTE $0xe0 // or r8b, r12b + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x000000a0249c0244 // add r11b, byte [rsp + 160] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + QUAD $0x000000a8249cb60f // movzx ebx, byte [rsp + 168] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xcb // or bl, r9b + WORD $0x8941; BYTE $0xd9 // mov r9d, ebx + WORD $0x0188 // mov byte [rcx], al + QUAD $0x000000b0249cb60f // movzx ebx, byte [rsp + 176] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x01718844 // mov byte [rcx + 1], r14b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0844; BYTE $0xce // or sil, r9b + LONG $0x02418844 // mov byte [rcx + 2], r8b + LONG $0x03718840 // mov byte [rcx + 3], sil + LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 + LONG $0x04c18348 // add rcx, 4 + WORD $0x8949; BYTE $0xce // mov r14, rcx + QUAD $0x000000b824848348; BYTE $0xff // add qword [rsp + 184], -1 + JNE LBB2_27 + QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] + QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + +LBB2_29: + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 + JNE LBB2_136 + WORD $0xff31 // xor edi, edi + JMP LBB2_138 + LBB2_32: WORD $0xff83; BYTE $0x02 // cmp edi, 2 JE LBB2_105 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB2_157 - WORD $0x8a44; BYTE $0x36 // mov r14b, byte [rsi] + JNE LBB2_165 + WORD $0x8a44; BYTE $0x1e // mov r11b, byte [rsi] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -10299,7 +10784,7 @@ LBB2_32: WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LBB2_36: - WORD $0x3a44; BYTE $0x32 // cmp r14b, byte [rdx] + WORD $0x3a44; BYTE $0x1a // cmp r11b, byte [rdx] LONG $0x01528d48 // lea rdx, [rdx + 1] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl @@ -10307,8 +10792,8 @@ LBB2_36: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xdc // mov r12, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf4 // mov r12, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0x3044; BYTE $0xcb // xor bl, r9b QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -10317,189 +10802,212 @@ LBB2_36: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_36 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_38: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_128 LONG $0x20ff8349 // cmp r15, 32 - LONG $0x24748944; BYTE $0x1c // mov dword [rsp + 28], r14d + LONG $0x245c8944; BYTE $0x1c // mov dword [rsp + 28], r11d QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x0000018824bc894c // mov qword [rsp + 392], r15 JB LBB2_42 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx - WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB2_165 - LONG $0xbb048d4b // lea rax, [r11 + 4*r15] + WORD $0x3949; BYTE $0xc6 // cmp r14, rax + JAE LBB2_166 + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] WORD $0x3948; BYTE $0xc2 // cmp rdx, rax - JAE LBB2_165 + JAE LBB2_166 LBB2_42: WORD $0xc031 // xor eax, eax QUAD $0x0000018024848948 // mov qword [rsp + 384], rax WORD $0x8949; BYTE $0xd4 // mov r12, rdx - QUAD $0x00000178249c894c // mov qword [rsp + 376], r11 + QUAD $0x0000017824b4894c // mov qword [rsp + 376], r14 LBB2_43: QUAD $0x0000018024bc2b4c // sub r15, qword [rsp + 384] - QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 LBB2_44: WORD $0x894c; BYTE $0xe1 // mov rcx, r12 - LONG $0x24343a45 // cmp r14b, byte [r12] + LONG $0x241c3a45 // cmp r11b, byte [r12] QUAD $0x000001402494940f // sete byte [rsp + 320] - LONG $0x24743a45; BYTE $0x01 // cmp r14b, byte [r12 + 1] - LONG $0xd2940f41 // sete r10b - LONG $0x24743a45; BYTE $0x02 // cmp r14b, byte [r12 + 2] - WORD $0x940f; BYTE $0xd3 // sete bl - LONG $0x24743a45; BYTE $0x03 // cmp r14b, byte [r12 + 3] + LONG $0x245c3a45; BYTE $0x01 // cmp r11b, byte [r12 + 1] + LONG $0xd1940f41 // sete r9b + LONG $0x245c3a45; BYTE $0x02 // cmp r11b, byte [r12 + 2] + LONG $0xd3940f41 // sete r11b + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x03 // cmp al, byte [r12 + 3] LONG $0xd5940f41 // sete r13b - LONG $0x24743a45; BYTE $0x04 // cmp r14b, byte [r12 + 4] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x24743a45; BYTE $0x05 // cmp r14b, byte [r12 + 5] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x24743a45; BYTE $0x06 // cmp r14b, byte [r12 + 6] - WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x24743a45; BYTE $0x07 // cmp r14b, byte [r12 + 7] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x04 // cmp al, byte [r12 + 4] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x05 // cmp al, byte [r12 + 5] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x06 // cmp al, byte [r12 + 6] + WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x07 // cmp al, byte [r12 + 7] LONG $0xd4940f41 // sete r12b - LONG $0x08713a44 // cmp r14b, byte [rcx + 8] - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x09713a44 // cmp r14b, byte [rcx + 9] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x08 // cmp al, byte [rcx + 8] + QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x09 // cmp al, byte [rcx + 9] LONG $0xd6940f40 // sete sil - LONG $0x0a713a44 // cmp r14b, byte [rcx + 10] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0a // cmp al, byte [rcx + 10] LONG $0xd7940f40 // sete dil - LONG $0x0b713a44 // cmp r14b, byte [rcx + 11] - LONG $0xd1940f41 // sete r9b - LONG $0x0c713a44 // cmp r14b, byte [rcx + 12] - LONG $0xd3940f41 // sete r11b - LONG $0x0d713a44 // cmp r14b, byte [rcx + 13] - LONG $0xd7940f41 // sete r15b - LONG $0x0e713a44 // cmp r14b, byte [rcx + 14] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x0f713a44 // cmp r14b, byte [rcx + 15] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0b // cmp al, byte [rcx + 11] LONG $0xd0940f41 // sete r8b - LONG $0x10713a44 // cmp r14b, byte [rcx + 16] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0c // cmp al, byte [rcx + 12] + LONG $0xd2940f41 // sete r10b + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0d // cmp al, byte [rcx + 13] + LONG $0xd7940f41 // sete r15b + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0e // cmp al, byte [rcx + 14] + QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0f // cmp al, byte [rcx + 15] + WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x10 // cmp dl, byte [rcx + 16] QUAD $0x000001202494940f // sete byte [rsp + 288] - LONG $0x11713a44 // cmp r14b, byte [rcx + 17] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x11 // cmp dl, byte [rcx + 17] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x12 // cmp dl, byte [rcx + 18] + QUAD $0x000000982494940f // sete byte [rsp + 152] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x13 // cmp dl, byte [rcx + 19] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x14 // cmp dl, byte [rcx + 20] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x15 // cmp dl, byte [rcx + 21] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x16 // cmp dl, byte [rcx + 22] LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x12713a44 // cmp r14b, byte [rcx + 18] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x13713a44 // cmp r14b, byte [rcx + 19] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x14713a44 // cmp r14b, byte [rcx + 20] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x15713a44 // cmp r14b, byte [rcx + 21] - QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x16713a44 // cmp r14b, byte [rcx + 22] - QUAD $0x000000902494940f // sete byte [rsp + 144] - LONG $0x17713a44 // cmp r14b, byte [rcx + 23] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x17 // cmp dl, byte [rcx + 23] LONG $0xd6940f41 // sete r14b LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x18 // cmp dl, byte [rcx + 24] QUAD $0x000001102494940f // sete byte [rsp + 272] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x19 // cmp dl, byte [rcx + 25] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1a // cmp dl, byte [rcx + 26] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1b // cmp dl, byte [rcx + 27] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1c // cmp dl, byte [rcx + 28] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1d // cmp dl, byte [rcx + 29] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1e // cmp dl, byte [rcx + 30] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1f // cmp dl, byte [rcx + 31] WORD $0x940f; BYTE $0xd2 // sete dl - WORD $0x0045; BYTE $0xd2 // add r10b, r10b - QUAD $0x0000014024940244 // add r10b, byte [rsp + 320] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0x0045; BYTE $0xc9 // add r9b, r9b + QUAD $0x00000140248c0244 // add r9b, byte [rsp + 320] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e4c041 // shl r12b, 7 - WORD $0x0841; BYTE $0xc4 // or r12b, al - WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0x0841; BYTE $0xdc // or r12b, bl + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0845; BYTE $0xcb // or r11b, r9b WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000a024b40240 // add sil, byte [rsp + 160] + QUAD $0x0000008824b40240 // add sil, byte [rsp + 136] LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0845; BYTE $0xdd // or r13b, r11b + LONG $0x245c8b44; BYTE $0x1c // mov r11d, dword [rsp + 28] LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + LONG $0x245cb60f; BYTE $0x30 // movzx ebx, byte [rsp + 48] WORD $0xe3c0; BYTE $0x04 // shl bl, 4 WORD $0x0844; BYTE $0xeb // or bl, r13b WORD $0xde89 // mov esi, ebx - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0841; BYTE $0xf9 // or r9b, dil - LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] WORD $0xe3c0; BYTE $0x05 // shl bl, 5 WORD $0x0840; BYTE $0xf3 // or bl, sil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x04e2c041 // shl r10b, 4 + WORD $0x0845; BYTE $0xc2 // or r10b, r8b LONG $0x05e7c041 // shl r15b, 5 - WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x2474b60f; BYTE $0x58 // movzx esi, byte [rsp + 88] + WORD $0x0845; BYTE $0xd7 // or r15b, r10b + QUAD $0x0000009024b4b60f // movzx esi, byte [rsp + 144] LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xf0 // or r8b, sil + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0x0841; BYTE $0xdc // or r12b, bl - WORD $0x0845; BYTE $0xf8 // or r8b, r15b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xc000 // add al, al - LONG $0x20248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 288] - LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0x0844; BYTE $0xf8 // or al, r15b + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xdb00 // add bl, bl + LONG $0x20249c02; WORD $0x0001; BYTE $0x00 // add bl, byte [rsp + 288] + WORD $0xde89 // mov esi, ebx + QUAD $0x00000098249cb60f // movzx ebx, byte [rsp + 152] WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0xc308 // or bl, al + WORD $0x0840; BYTE $0xf3 // or bl, sil WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x03 // shl bl, 3 WORD $0x0840; BYTE $0xf3 // or bl, sil WORD $0xde89 // mov esi, ebx - QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] WORD $0xe3c0; BYTE $0x04 // shl bl, 4 WORD $0x0840; BYTE $0xf3 // or bl, sil WORD $0xde89 // mov esi, ebx - QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] WORD $0xe3c0; BYTE $0x05 // shl bl, 5 WORD $0x0840; BYTE $0xf3 // or bl, sil QUAD $0x0000017824b48b48 // mov rsi, qword [rsp + 376] WORD $0x8844; BYTE $0x26 // mov byte [rsi], r12b - QUAD $0x0000009024bcb60f // movzx edi, byte [rsp + 144] + LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] LONG $0x06e7c040 // shl dil, 6 LONG $0x07e6c041 // shl r14b, 7 WORD $0x0841; BYTE $0xfe // or r14b, dil - LONG $0x01468844 // mov byte [rsi + 1], r8b + WORD $0x4688; BYTE $0x01 // mov byte [rsi + 1], al WORD $0x0841; BYTE $0xde // or r14b, bl - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xc000 // add al, al LONG $0x10248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 272] WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xd808 // or al, bl LONG $0x245cb60f; BYTE $0x20 // movzx ebx, byte [rsp + 32] @@ -10508,12 +11016,11 @@ LBB2_44: WORD $0xda08 // or dl, bl WORD $0xc208 // or dl, al LONG $0x02768844 // mov byte [rsi + 2], r14b - LONG $0x24748b44; BYTE $0x1c // mov r14d, dword [rsp + 28] WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl LONG $0x20618d4c // lea r12, [rcx + 32] LONG $0x04c68348 // add rsi, 4 QUAD $0x0000017824b48948 // mov qword [rsp + 376], rsi - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 JNE LBB2_44 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x0000018824bc8b4c // mov r15, qword [rsp + 392] @@ -10523,11 +11030,11 @@ LBB2_46: WORD $0xff83; BYTE $0x07 // cmp edi, 7 JE LBB2_117 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB2_157 + JNE LBB2_165 WORD $0x8b4c; BYTE $0x2e // mov r13, qword [rsi] - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -10545,8 +11052,8 @@ LBB2_50: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xd9 // mov r9, r11 - LONG $0x04b60f45; BYTE $0x33 // movzx r8d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf1 // mov r9, r14 + LONG $0x04b60f45; BYTE $0x36 // movzx r8d, byte [r14 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -10555,40 +11062,40 @@ LBB2_50: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_50 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_52: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_56 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 + QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 LBB2_54: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 WORD $0x3b4c; BYTE $0x2a // cmp r13, qword [rdx] - QUAD $0x000000982494940f // sete byte [rsp + 152] + QUAD $0x000000a02494940f // sete byte [rsp + 160] LONG $0x086a3b4c // cmp r13, qword [rdx + 8] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106a3b4c // cmp r13, qword [rdx + 16] LONG $0xd6940f41 // sete r14b LONG $0x186a3b4c // cmp r13, qword [rdx + 24] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000882494940f // sete byte [rsp + 136] LONG $0x206a3b4c // cmp r13, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x286a3b4c // cmp r13, qword [rdx + 40] - QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x306a3b4c // cmp r13, qword [rdx + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x386a3b4c // cmp r13, qword [rdx + 56] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x406a3b4c // cmp r13, qword [rdx + 64] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + QUAD $0x000000902494940f // sete byte [rsp + 144] LONG $0x486a3b4c // cmp r13, qword [rdx + 72] LONG $0xd6940f40 // sete sil LONG $0x506a3b4c // cmp r13, qword [rdx + 80] @@ -10596,37 +11103,37 @@ LBB2_54: LONG $0x586a3b4c // cmp r13, qword [rdx + 88] LONG $0xd1940f41 // sete r9b LONG $0x606a3b4c // cmp r13, qword [rdx + 96] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x686a3b4c // cmp r13, qword [rdx + 104] LONG $0xd4940f41 // sete r12b LONG $0x706a3b4c // cmp r13, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x786a3b4c // cmp r13, qword [rdx + 120] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x80aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 128] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x88aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 136] LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x88aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 136] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] LONG $0x90aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 144] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x98aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 152] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0xa0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 160] - QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0xa8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 168] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0xb0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 176] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0xb8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 184] LONG $0xd7940f41 // sete r15b LONG $0xc0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 192] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0xc8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 200] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] LONG $0xd0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 208] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0xd8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 216] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0xe0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 224] QUAD $0x000001402494940f // sete byte [rsp + 320] LONG $0xe8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 232] @@ -10634,111 +11141,109 @@ LBB2_54: LONG $0xf0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 240] LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0xf8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 248] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000a024940244 // add r10b, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x50 // add sil, byte [rsp + 80] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x0000009024b40240 // add sil, byte [rsp + 144] + QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x58 // movzx esi, byte [rsp + 88] + WORD $0x0845; BYTE $0xdc // or r12b, r11b + QUAD $0x0000009824b4b60f // movzx esi, byte [rsp + 152] LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b - QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xdb00 // add bl, bl + LONG $0x60245c02 // add bl, byte [rsp + 96] + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0x8841; BYTE $0x3e // mov byte [r14], dil + QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] + LONG $0x06e6c040 // shl sil, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x014b8841 // mov byte [r11 + 1], cl WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + LONG $0x01468841 // mov byte [r14 + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x035b8841 // mov byte [r11 + 3], bl + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x027e8845 // mov byte [r14 + 2], r15b + LONG $0x034e8841 // mov byte [r14 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c68349 // add r14, 4 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB2_54 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] LBB2_56: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 JE LBB2_93 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 @@ -10750,8 +11255,8 @@ LBB2_59: WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf3 // mov r11, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -10759,7 +11264,7 @@ LBB2_59: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + LONG $0x361c8841 // mov byte [r14 + rsi], bl LONG $0x02c78348 // add rdi, 2 LONG $0x086a3b4c // cmp r13, qword [rdx + 8] LONG $0x10528d48 // lea rdx, [rdx + 16] @@ -10771,16 +11276,16 @@ LBB2_59: WORD $0xe0d2 // shl al, cl WORD $0x2044; BYTE $0xc8 // and al, r9b WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al + LONG $0x36048841 // mov byte [r14 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB2_59 JMP LBB2_146 LBB2_60: LONG $0x2eb70f44 // movzx r13d, word [rsi] - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -10798,8 +11303,8 @@ LBB2_62: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xd9 // mov r9, r11 - LONG $0x04b60f45; BYTE $0x33 // movzx r8d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf1 // mov r9, r14 + LONG $0x04b60f45; BYTE $0x36 // movzx r8d, byte [r14 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -10808,191 +11313,188 @@ LBB2_62: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_62 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_64: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_68 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 + QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 LBB2_66: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 - LONG $0x2a3b4466 // cmp r13w, word [rdx] - WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x6a3b4466; BYTE $0x02 // cmp r13w, word [rdx + 2] - LONG $0xd7940f40 // sete dil - LONG $0x6a3b4466; BYTE $0x04 // cmp r13w, word [rdx + 4] - LONG $0xd6940f41 // sete r14b - LONG $0x6a3b4466; BYTE $0x06 // cmp r13w, word [rdx + 6] - QUAD $0x000000982494940f // sete byte [rsp + 152] - LONG $0x6a3b4466; BYTE $0x08 // cmp r13w, word [rdx + 8] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x6a3b4466; BYTE $0x0a // cmp r13w, word [rdx + 10] - QUAD $0x000000902494940f // sete byte [rsp + 144] - LONG $0x6a3b4466; BYTE $0x0c // cmp r13w, word [rdx + 12] - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x6a3b4466; BYTE $0x0e // cmp r13w, word [rdx + 14] - LONG $0xd3940f41 // sete r11b - LONG $0x6a3b4466; BYTE $0x10 // cmp r13w, word [rdx + 16] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x6a3b4466; BYTE $0x12 // cmp r13w, word [rdx + 18] - LONG $0xd6940f40 // sete sil - LONG $0x6a3b4466; BYTE $0x14 // cmp r13w, word [rdx + 20] - LONG $0xd0940f41 // sete r8b - LONG $0x6a3b4466; BYTE $0x16 // cmp r13w, word [rdx + 22] - LONG $0xd1940f41 // sete r9b - LONG $0x6a3b4466; BYTE $0x18 // cmp r13w, word [rdx + 24] - LONG $0xd2940f41 // sete r10b - LONG $0x6a3b4466; BYTE $0x1a // cmp r13w, word [rdx + 26] - LONG $0xd4940f41 // sete r12b - LONG $0x6a3b4466; BYTE $0x1c // cmp r13w, word [rdx + 28] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x6a3b4466; BYTE $0x1e // cmp r13w, word [rdx + 30] - WORD $0x940f; BYTE $0xd1 // sete cl - LONG $0x6a3b4466; BYTE $0x20 // cmp r13w, word [rdx + 32] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x6a3b4466; BYTE $0x22 // cmp r13w, word [rdx + 34] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x6a3b4466; BYTE $0x24 // cmp r13w, word [rdx + 36] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x6a3b4466; BYTE $0x26 // cmp r13w, word [rdx + 38] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x6a3b4466; BYTE $0x28 // cmp r13w, word [rdx + 40] - QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x6a3b4466; BYTE $0x2a // cmp r13w, word [rdx + 42] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x6a3b4466; BYTE $0x2c // cmp r13w, word [rdx + 44] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x6a3b4466; BYTE $0x2e // cmp r13w, word [rdx + 46] - LONG $0xd7940f41 // sete r15b - LONG $0x6a3b4466; BYTE $0x30 // cmp r13w, word [rdx + 48] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x6a3b4466; BYTE $0x32 // cmp r13w, word [rdx + 50] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x6a3b4466; BYTE $0x34 // cmp r13w, word [rdx + 52] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x6a3b4466; BYTE $0x36 // cmp r13w, word [rdx + 54] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x6a3b4466; BYTE $0x38 // cmp r13w, word [rdx + 56] - QUAD $0x000001402494940f // sete byte [rsp + 320] - LONG $0x6a3b4466; BYTE $0x3a // cmp r13w, word [rdx + 58] - QUAD $0x000001202494940f // sete byte [rsp + 288] - LONG $0x6a3b4466; BYTE $0x3c // cmp r13w, word [rdx + 60] - LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] - LONG $0x6a3b4466; BYTE $0x3e // cmp r13w, word [rdx + 62] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - WORD $0x0840; BYTE $0xc7 // or dil, al - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x58 // add sil, byte [rsp + 88] - QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax - LONG $0x02e0c041 // shl r8b, 2 - WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x50 // movzx esi, byte [rsp + 80] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b - QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x035b8841 // mov byte [r11 + 3], bl - LONG $0x40c28348 // add rdx, 64 - LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 + LONG $0x2a3b4466 // cmp r13w, word [rdx] + QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x6a3b4466; BYTE $0x02 // cmp r13w, word [rdx + 2] + LONG $0xd2940f41 // sete r10b + LONG $0x6a3b4466; BYTE $0x04 // cmp r13w, word [rdx + 4] + LONG $0xd6940f41 // sete r14b + LONG $0x6a3b4466; BYTE $0x06 // cmp r13w, word [rdx + 6] + QUAD $0x000000a02494940f // sete byte [rsp + 160] + LONG $0x6a3b4466; BYTE $0x08 // cmp r13w, word [rdx + 8] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x6a3b4466; BYTE $0x0a // cmp r13w, word [rdx + 10] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x6a3b4466; BYTE $0x0c // cmp r13w, word [rdx + 12] + WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0x6a3b4466; BYTE $0x0e // cmp r13w, word [rdx + 14] + LONG $0xd7940f40 // sete dil + LONG $0x6a3b4466; BYTE $0x10 // cmp r13w, word [rdx + 16] + QUAD $0x000000982494940f // sete byte [rsp + 152] + LONG $0x6a3b4466; BYTE $0x12 // cmp r13w, word [rdx + 18] + LONG $0xd6940f40 // sete sil + LONG $0x6a3b4466; BYTE $0x14 // cmp r13w, word [rdx + 20] + LONG $0xd0940f41 // sete r8b + LONG $0x6a3b4466; BYTE $0x16 // cmp r13w, word [rdx + 22] + LONG $0xd1940f41 // sete r9b + LONG $0x6a3b4466; BYTE $0x18 // cmp r13w, word [rdx + 24] + LONG $0xd3940f41 // sete r11b + LONG $0x6a3b4466; BYTE $0x1a // cmp r13w, word [rdx + 26] + LONG $0xd4940f41 // sete r12b + LONG $0x6a3b4466; BYTE $0x1c // cmp r13w, word [rdx + 28] + QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x6a3b4466; BYTE $0x1e // cmp r13w, word [rdx + 30] + WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x6a3b4466; BYTE $0x20 // cmp r13w, word [rdx + 32] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x6a3b4466; BYTE $0x22 // cmp r13w, word [rdx + 34] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x6a3b4466; BYTE $0x24 // cmp r13w, word [rdx + 36] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0x6a3b4466; BYTE $0x26 // cmp r13w, word [rdx + 38] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x6a3b4466; BYTE $0x28 // cmp r13w, word [rdx + 40] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x6a3b4466; BYTE $0x2a // cmp r13w, word [rdx + 42] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x6a3b4466; BYTE $0x2c // cmp r13w, word [rdx + 44] + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x6a3b4466; BYTE $0x2e // cmp r13w, word [rdx + 46] + LONG $0xd7940f41 // sete r15b + LONG $0x6a3b4466; BYTE $0x30 // cmp r13w, word [rdx + 48] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x6a3b4466; BYTE $0x32 // cmp r13w, word [rdx + 50] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x6a3b4466; BYTE $0x34 // cmp r13w, word [rdx + 52] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x6a3b4466; BYTE $0x36 // cmp r13w, word [rdx + 54] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x6a3b4466; BYTE $0x38 // cmp r13w, word [rdx + 56] + QUAD $0x000001402494940f // sete byte [rsp + 320] + LONG $0x6a3b4466; BYTE $0x3a // cmp r13w, word [rdx + 58] + QUAD $0x000001202494940f // sete byte [rsp + 288] + LONG $0x6a3b4466; BYTE $0x3c // cmp r13w, word [rdx + 60] + LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] + LONG $0x6a3b4466; BYTE $0x3e // cmp r13w, word [rdx + 62] + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009024940244 // add r10b, byte [rsp + 144] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xf6 // add sil, sil + QUAD $0x0000009824b40240 // add sil, byte [rsp + 152] + QUAD $0x000000a0249cb60f // movzx ebx, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf0 // or r8b, sil + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x05e4c041 // shl r12b, 5 + WORD $0x0845; BYTE $0xdc // or r12b, r11b + QUAD $0x0000008824b4b60f // movzx esi, byte [rsp + 136] + LONG $0x06e6c040 // shl sil, 6 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf0 // or al, sil + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xdb00 // add bl, bl + LONG $0x60245c02 // add bl, byte [rsp + 96] + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0x8841; BYTE $0x3e // mov byte [r14], dil + QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] + LONG $0x06e6c040 // shl sil, 6 + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xf7 // or r15b, sil + LONG $0x01468841 // mov byte [r14 + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xc000 // add al, al + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x027e8845 // mov byte [r14 + 2], r15b + LONG $0x034e8841 // mov byte [r14 + 3], cl + LONG $0x40c28348 // add rdx, 64 + LONG $0x04c68349 // add r14, 4 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB2_66 - QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] + QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] LBB2_68: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 JE LBB2_82 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 @@ -11004,8 +11506,8 @@ LBB2_71: WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf3 // mov r11, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -11013,7 +11515,7 @@ LBB2_71: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + LONG $0x361c8841 // mov byte [r14 + rsi], bl LONG $0x02c78348 // add rdi, 2 LONG $0x6a3b4466; BYTE $0x02 // cmp r13w, word [rdx + 2] LONG $0x04528d48 // lea rdx, [rdx + 4] @@ -11025,16 +11527,16 @@ LBB2_71: WORD $0xe0d2 // shl al, cl WORD $0x2044; BYTE $0xc8 // and al, r9b WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al + LONG $0x36048841 // mov byte [r14 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB2_71 JMP LBB2_142 LBB2_72: LONG $0x2eb70f44 // movzx r13d, word [rsi] - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -11052,8 +11554,8 @@ LBB2_74: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xd9 // mov r9, r11 - LONG $0x04b60f45; BYTE $0x33 // movzx r8d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf1 // mov r9, r14 + LONG $0x04b60f45; BYTE $0x36 // movzx r8d, byte [r14 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -11062,190 +11564,188 @@ LBB2_74: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_74 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_76: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_80 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 + QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 LBB2_78: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 - LONG $0x2a3b4466 // cmp r13w, word [rdx] - QUAD $0x000000982494940f // sete byte [rsp + 152] - LONG $0x6a3b4466; BYTE $0x02 // cmp r13w, word [rdx + 2] - LONG $0xd7940f40 // sete dil - LONG $0x6a3b4466; BYTE $0x04 // cmp r13w, word [rdx + 4] - LONG $0xd6940f41 // sete r14b - LONG $0x6a3b4466; BYTE $0x06 // cmp r13w, word [rdx + 6] - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x6a3b4466; BYTE $0x08 // cmp r13w, word [rdx + 8] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x6a3b4466; BYTE $0x0a // cmp r13w, word [rdx + 10] - QUAD $0x000000902494940f // sete byte [rsp + 144] - LONG $0x6a3b4466; BYTE $0x0c // cmp r13w, word [rdx + 12] - WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x6a3b4466; BYTE $0x0e // cmp r13w, word [rdx + 14] - LONG $0xd3940f41 // sete r11b - LONG $0x6a3b4466; BYTE $0x10 // cmp r13w, word [rdx + 16] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x6a3b4466; BYTE $0x12 // cmp r13w, word [rdx + 18] - LONG $0xd6940f40 // sete sil - LONG $0x6a3b4466; BYTE $0x14 // cmp r13w, word [rdx + 20] - LONG $0xd0940f41 // sete r8b - LONG $0x6a3b4466; BYTE $0x16 // cmp r13w, word [rdx + 22] - LONG $0xd1940f41 // sete r9b - LONG $0x6a3b4466; BYTE $0x18 // cmp r13w, word [rdx + 24] - LONG $0xd2940f41 // sete r10b - LONG $0x6a3b4466; BYTE $0x1a // cmp r13w, word [rdx + 26] - LONG $0xd4940f41 // sete r12b - LONG $0x6a3b4466; BYTE $0x1c // cmp r13w, word [rdx + 28] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x6a3b4466; BYTE $0x1e // cmp r13w, word [rdx + 30] - WORD $0x940f; BYTE $0xd1 // sete cl - LONG $0x6a3b4466; BYTE $0x20 // cmp r13w, word [rdx + 32] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x6a3b4466; BYTE $0x22 // cmp r13w, word [rdx + 34] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x6a3b4466; BYTE $0x24 // cmp r13w, word [rdx + 36] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x6a3b4466; BYTE $0x26 // cmp r13w, word [rdx + 38] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x6a3b4466; BYTE $0x28 // cmp r13w, word [rdx + 40] - QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x6a3b4466; BYTE $0x2a // cmp r13w, word [rdx + 42] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x6a3b4466; BYTE $0x2c // cmp r13w, word [rdx + 44] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x6a3b4466; BYTE $0x2e // cmp r13w, word [rdx + 46] - LONG $0xd7940f41 // sete r15b - LONG $0x6a3b4466; BYTE $0x30 // cmp r13w, word [rdx + 48] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x6a3b4466; BYTE $0x32 // cmp r13w, word [rdx + 50] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x6a3b4466; BYTE $0x34 // cmp r13w, word [rdx + 52] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x6a3b4466; BYTE $0x36 // cmp r13w, word [rdx + 54] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x6a3b4466; BYTE $0x38 // cmp r13w, word [rdx + 56] - QUAD $0x000001402494940f // sete byte [rsp + 320] - LONG $0x6a3b4466; BYTE $0x3a // cmp r13w, word [rdx + 58] - QUAD $0x000001202494940f // sete byte [rsp + 288] - LONG $0x6a3b4466; BYTE $0x3c // cmp r13w, word [rdx + 60] - LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] - LONG $0x6a3b4466; BYTE $0x3e // cmp r13w, word [rdx + 62] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x50 // add sil, byte [rsp + 80] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax - LONG $0x02e0c041 // shl r8b, 2 - WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x58 // movzx esi, byte [rsp + 88] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b - QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x035b8841 // mov byte [r11 + 3], bl - LONG $0x40c28348 // add rdx, 64 - LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 + LONG $0x2a3b4466 // cmp r13w, word [rdx] + QUAD $0x000000a02494940f // sete byte [rsp + 160] + LONG $0x6a3b4466; BYTE $0x02 // cmp r13w, word [rdx + 2] + LONG $0xd2940f41 // sete r10b + LONG $0x6a3b4466; BYTE $0x04 // cmp r13w, word [rdx + 4] + LONG $0xd6940f41 // sete r14b + LONG $0x6a3b4466; BYTE $0x06 // cmp r13w, word [rdx + 6] + QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x6a3b4466; BYTE $0x08 // cmp r13w, word [rdx + 8] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0x6a3b4466; BYTE $0x0a // cmp r13w, word [rdx + 10] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x6a3b4466; BYTE $0x0c // cmp r13w, word [rdx + 12] + WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0x6a3b4466; BYTE $0x0e // cmp r13w, word [rdx + 14] + LONG $0xd7940f40 // sete dil + LONG $0x6a3b4466; BYTE $0x10 // cmp r13w, word [rdx + 16] + QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x6a3b4466; BYTE $0x12 // cmp r13w, word [rdx + 18] + LONG $0xd6940f40 // sete sil + LONG $0x6a3b4466; BYTE $0x14 // cmp r13w, word [rdx + 20] + LONG $0xd0940f41 // sete r8b + LONG $0x6a3b4466; BYTE $0x16 // cmp r13w, word [rdx + 22] + LONG $0xd1940f41 // sete r9b + LONG $0x6a3b4466; BYTE $0x18 // cmp r13w, word [rdx + 24] + LONG $0xd3940f41 // sete r11b + LONG $0x6a3b4466; BYTE $0x1a // cmp r13w, word [rdx + 26] + LONG $0xd4940f41 // sete r12b + LONG $0x6a3b4466; BYTE $0x1c // cmp r13w, word [rdx + 28] + QUAD $0x000000982494940f // sete byte [rsp + 152] + LONG $0x6a3b4466; BYTE $0x1e // cmp r13w, word [rdx + 30] + WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x6a3b4466; BYTE $0x20 // cmp r13w, word [rdx + 32] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x6a3b4466; BYTE $0x22 // cmp r13w, word [rdx + 34] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x6a3b4466; BYTE $0x24 // cmp r13w, word [rdx + 36] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x6a3b4466; BYTE $0x26 // cmp r13w, word [rdx + 38] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x6a3b4466; BYTE $0x28 // cmp r13w, word [rdx + 40] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x6a3b4466; BYTE $0x2a // cmp r13w, word [rdx + 42] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x6a3b4466; BYTE $0x2c // cmp r13w, word [rdx + 44] + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x6a3b4466; BYTE $0x2e // cmp r13w, word [rdx + 46] + LONG $0xd7940f41 // sete r15b + LONG $0x6a3b4466; BYTE $0x30 // cmp r13w, word [rdx + 48] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x6a3b4466; BYTE $0x32 // cmp r13w, word [rdx + 50] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x6a3b4466; BYTE $0x34 // cmp r13w, word [rdx + 52] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x6a3b4466; BYTE $0x36 // cmp r13w, word [rdx + 54] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x6a3b4466; BYTE $0x38 // cmp r13w, word [rdx + 56] + QUAD $0x000001402494940f // sete byte [rsp + 320] + LONG $0x6a3b4466; BYTE $0x3a // cmp r13w, word [rdx + 58] + QUAD $0x000001202494940f // sete byte [rsp + 288] + LONG $0x6a3b4466; BYTE $0x3c // cmp r13w, word [rdx + 60] + LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] + LONG $0x6a3b4466; BYTE $0x3e // cmp r13w, word [rdx + 62] + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000a024940244 // add r10b, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xf6 // add sil, sil + QUAD $0x0000009024b40240 // add sil, byte [rsp + 144] + QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf0 // or r8b, sil + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x05e4c041 // shl r12b, 5 + WORD $0x0845; BYTE $0xdc // or r12b, r11b + QUAD $0x0000009824b4b60f // movzx esi, byte [rsp + 152] + LONG $0x06e6c040 // shl sil, 6 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf0 // or al, sil + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xdb00 // add bl, bl + LONG $0x60245c02 // add bl, byte [rsp + 96] + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0x8841; BYTE $0x3e // mov byte [r14], dil + QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] + LONG $0x06e6c040 // shl sil, 6 + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xf7 // or r15b, sil + LONG $0x01468841 // mov byte [r14 + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xc000 // add al, al + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x027e8845 // mov byte [r14 + 2], r15b + LONG $0x034e8841 // mov byte [r14 + 3], cl + LONG $0x40c28348 // add rdx, 64 + LONG $0x04c68349 // add r14, 4 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB2_78 - QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] + QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] LBB2_80: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 JNE LBB2_140 LBB2_82: @@ -11254,9 +11754,9 @@ LBB2_82: LBB2_83: WORD $0x8b4c; BYTE $0x2e // mov r13, qword [rsi] - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -11274,8 +11774,8 @@ LBB2_85: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xd9 // mov r9, r11 - LONG $0x04b60f45; BYTE $0x33 // movzx r8d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf1 // mov r9, r14 + LONG $0x04b60f45; BYTE $0x36 // movzx r8d, byte [r14 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -11284,40 +11784,40 @@ LBB2_85: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_85 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_87: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_91 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 + QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 LBB2_89: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 WORD $0x3b4c; BYTE $0x2a // cmp r13, qword [rdx] - QUAD $0x000000982494940f // sete byte [rsp + 152] + QUAD $0x000000a02494940f // sete byte [rsp + 160] LONG $0x086a3b4c // cmp r13, qword [rdx + 8] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106a3b4c // cmp r13, qword [rdx + 16] LONG $0xd6940f41 // sete r14b LONG $0x186a3b4c // cmp r13, qword [rdx + 24] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000882494940f // sete byte [rsp + 136] LONG $0x206a3b4c // cmp r13, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x286a3b4c // cmp r13, qword [rdx + 40] - QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x306a3b4c // cmp r13, qword [rdx + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x386a3b4c // cmp r13, qword [rdx + 56] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x406a3b4c // cmp r13, qword [rdx + 64] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + QUAD $0x000000902494940f // sete byte [rsp + 144] LONG $0x486a3b4c // cmp r13, qword [rdx + 72] LONG $0xd6940f40 // sete sil LONG $0x506a3b4c // cmp r13, qword [rdx + 80] @@ -11325,37 +11825,37 @@ LBB2_89: LONG $0x586a3b4c // cmp r13, qword [rdx + 88] LONG $0xd1940f41 // sete r9b LONG $0x606a3b4c // cmp r13, qword [rdx + 96] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x686a3b4c // cmp r13, qword [rdx + 104] LONG $0xd4940f41 // sete r12b LONG $0x706a3b4c // cmp r13, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x786a3b4c // cmp r13, qword [rdx + 120] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x80aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 128] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x88aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 136] LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x88aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 136] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] LONG $0x90aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 144] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x98aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 152] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0xa0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 160] - QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0xa8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 168] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0xb0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 176] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0xb8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 184] LONG $0xd7940f41 // sete r15b LONG $0xc0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 192] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0xc8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 200] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] LONG $0xd0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 208] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0xd8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 216] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0xe0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 224] QUAD $0x000001402494940f // sete byte [rsp + 320] LONG $0xe8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 232] @@ -11363,111 +11863,109 @@ LBB2_89: LONG $0xf0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 240] LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0xf8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 248] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000a024940244 // add r10b, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x50 // add sil, byte [rsp + 80] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x0000009024b40240 // add sil, byte [rsp + 144] + QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x58 // movzx esi, byte [rsp + 88] + WORD $0x0845; BYTE $0xdc // or r12b, r11b + QUAD $0x0000009824b4b60f // movzx esi, byte [rsp + 152] LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf0 // or al, sil + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xdb00 // add bl, bl + LONG $0x60245c02 // add bl, byte [rsp + 96] + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0x8841; BYTE $0x3e // mov byte [r14], dil + QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] + LONG $0x06e6c040 // shl sil, 6 + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xf7 // or r15b, sil + LONG $0x01468841 // mov byte [r14 + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b - QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x035b8841 // mov byte [r11 + 3], bl + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x027e8845 // mov byte [r14 + 2], r15b + LONG $0x034e8841 // mov byte [r14 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c68349 // add r14, 4 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB2_89 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] LBB2_91: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 JNE LBB2_144 LBB2_93: @@ -11475,9 +11973,9 @@ LBB2_93: JMP LBB2_146 LBB2_94: - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -11490,14 +11988,16 @@ LBB2_94: LBB2_96: LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] LONG $0x04528d48 // lea rdx, [rdx + 4] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl WORD $0xdbf6 // neg bl LONG $0x07708d48 // lea rsi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xdf // mov r15, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf7 // mov r15, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0x3044; BYTE $0xcb // xor bl, r9b QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -11506,192 +12006,276 @@ LBB2_96: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_96 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_98: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_102 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 - QUAD $0x0000009824b4894c // mov qword [rsp + 152], r14 + QUAD $0x000000b8249c894c // mov qword [rsp + 184], r11 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 LBB2_100: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x422ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rdx + 4] - LONG $0xd0940f41 // sete r8b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al LONG $0x422ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rdx + 8] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x422ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rdx + 12] - LONG $0xd5940f41 // sete r13b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl LONG $0x422ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rdx + 16] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x422ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rdx + 20] - QUAD $0x000000902494940f // sete byte [rsp + 144] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al LONG $0x422ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rdx + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl LONG $0x422ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rdx + 28] - LONG $0xd6940f41 // sete r14b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl LONG $0x422ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rdx + 32] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x422ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rdx + 36] - LONG $0xd6940f40 // sete sil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl LONG $0x422ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rdx + 40] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl LONG $0x422ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rdx + 44] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x422ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rdx + 48] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl LONG $0x422ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rdx + 52] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x422ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rdx + 56] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x422ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rdx + 60] + WORD $0x9b0f; BYTE $0xd0 // setnp al WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x422ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rdx + 64] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x422ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rdx + 68] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x422ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rdx + 72] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x422ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rdx + 76] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al LONG $0x422ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rdx + 80] - QUAD $0x000000882494940f // sete byte [rsp + 136] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl LONG $0x422ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rdx + 84] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl LONG $0x422ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rdx + 88] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x422ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rdx + 92] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al + LONG $0x422ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rdx + 92] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al LONG $0x422ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rdx + 96] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl LONG $0x422ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rdx + 100] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al LONG $0x422ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rdx + 104] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al LONG $0x422ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rdx + 108] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al LONG $0x422ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rdx + 112] - QUAD $0x000001402494940f // sete byte [rsp + 320] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl LONG $0x422ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rdx + 116] - QUAD $0x000001202494940f // sete byte [rsp + 288] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl LONG $0x422ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rdx + 120] - LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x422ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rdx + 124] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000a024840244 // add r8b, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e6c041 // shl r14b, 7 - WORD $0x0841; BYTE $0xc6 // or r14b, al - LONG $0x02e3c041 // shl r11b, 2 - WORD $0x0845; BYTE $0xc3 // or r11b, r8b - WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x58 // add sil, byte [rsp + 88] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xdd // or r13b, r11b - QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0841; BYTE $0xf9 // or r9b, dil - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f40 // sete sil + WORD $0x2040; BYTE $0xc6 // and sil, al + WORD $0x0045; BYTE $0xed // add r13b, r13b + LONG $0x246c0244; BYTE $0x38 // add r13b, byte [rsp + 56] + WORD $0x8944; BYTE $0xe8 // mov eax, r13d LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x60 // movzx esi, byte [rsp + 96] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc6 // or r14b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] - WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0x8845; BYTE $0x33 // mov byte [r11], r14b - LONG $0x2474b60f; BYTE $0x38 // movzx esi, byte [rsp + 56] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + QUAD $0x00008024acb60f44; BYTE $0x00 // movzx r13d, byte [rsp + 128] + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x64b60f44; WORD $0x2024 // movzx r12d, byte [rsp + 32] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + WORD $0xc900 // add cl, cl + LONG $0x30244c02 // add cl, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xcb08 // or bl, cl + LONG $0x6cb60f44; WORD $0x2824 // movzx r13d, byte [rsp + 40] + LONG $0x04e5c041 // shl r13b, 4 + WORD $0x0841; BYTE $0xc5 // or r13b, al + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + LONG $0x20248488; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], al + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + WORD $0xc900 // add cl, cl + LONG $0x68244c02 // add cl, byte [rsp + 104] + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x035b8841 // mov byte [r11 + 3], bl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0841; BYTE $0xde // or r14b, bl + QUAD $0x00000098248cb60f // movzx ecx, byte [rsp + 152] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xf1 // or cl, r14b + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] + LONG $0x64b60f44; WORD $0x1c24 // movzx r12d, byte [rsp + 28] + WORD $0x0845; BYTE $0xec // or r12b, r13b + QUAD $0x00000090249cb60f // movzx ebx, byte [rsp + 144] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c041 // shl r15b, 6 + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2024840a; WORD $0x0001; BYTE $0x00 // or al, byte [rsp + 288] + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf8 // or r8b, r15b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x00000088249c0244 // add r11b, byte [rsp + 136] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + QUAD $0x000000a0248cb60f // movzx ecx, byte [rsp + 160] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xc9 // or cl, r9b + WORD $0x8845; BYTE $0x26 // mov byte [r14], r12b + QUAD $0x000000b0249cb60f // movzx ebx, byte [rsp + 176] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x01468841 // mov byte [r14 + 1], al + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0840; BYTE $0xce // or sil, cl + LONG $0x02468845 // mov byte [r14 + 2], r8b + LONG $0x03768841 // mov byte [r14 + 3], sil LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c38349 // add r11, 4 - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + LONG $0x04c68349 // add r14, 4 + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 JNE LBB2_100 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000a824b48b4c // mov r14, qword [rsp + 168] + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] LBB2_102: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 JNE LBB2_148 WORD $0xff31 // xor edi, edi JMP LBB2_150 LBB2_105: - WORD $0x8a44; BYTE $0x36 // mov r14b, byte [rsi] + WORD $0x8a44; BYTE $0x1e // mov r11b, byte [rsi] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -11704,7 +12288,7 @@ LBB2_105: WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LBB2_107: - WORD $0x3a44; BYTE $0x32 // cmp r14b, byte [rdx] + WORD $0x3a44; BYTE $0x1a // cmp r11b, byte [rdx] LONG $0x01528d48 // lea rdx, [rdx + 1] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl @@ -11712,8 +12296,8 @@ LBB2_107: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xdc // mov r12, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf4 // mov r12, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0x3044; BYTE $0xcb // xor bl, r9b QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -11722,189 +12306,212 @@ LBB2_107: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_107 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_109: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_132 LONG $0x20ff8349 // cmp r15, 32 - LONG $0x24748944; BYTE $0x1c // mov dword [rsp + 28], r14d + LONG $0x245c8944; BYTE $0x1c // mov dword [rsp + 28], r11d QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x0000018824bc894c // mov qword [rsp + 392], r15 JB LBB2_113 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx - WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB2_168 - LONG $0xbb048d4b // lea rax, [r11 + 4*r15] + WORD $0x3949; BYTE $0xc6 // cmp r14, rax + JAE LBB2_169 + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] WORD $0x3948; BYTE $0xc2 // cmp rdx, rax - JAE LBB2_168 + JAE LBB2_169 LBB2_113: WORD $0xc031 // xor eax, eax QUAD $0x0000018024848948 // mov qword [rsp + 384], rax WORD $0x8949; BYTE $0xd4 // mov r12, rdx - QUAD $0x00000178249c894c // mov qword [rsp + 376], r11 + QUAD $0x0000017824b4894c // mov qword [rsp + 376], r14 LBB2_114: QUAD $0x0000018024bc2b4c // sub r15, qword [rsp + 384] - QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 LBB2_115: WORD $0x894c; BYTE $0xe1 // mov rcx, r12 - LONG $0x24343a45 // cmp r14b, byte [r12] + LONG $0x241c3a45 // cmp r11b, byte [r12] QUAD $0x000001402494940f // sete byte [rsp + 320] - LONG $0x24743a45; BYTE $0x01 // cmp r14b, byte [r12 + 1] - LONG $0xd2940f41 // sete r10b - LONG $0x24743a45; BYTE $0x02 // cmp r14b, byte [r12 + 2] - WORD $0x940f; BYTE $0xd3 // sete bl - LONG $0x24743a45; BYTE $0x03 // cmp r14b, byte [r12 + 3] + LONG $0x245c3a45; BYTE $0x01 // cmp r11b, byte [r12 + 1] + LONG $0xd1940f41 // sete r9b + LONG $0x245c3a45; BYTE $0x02 // cmp r11b, byte [r12 + 2] + LONG $0xd3940f41 // sete r11b + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x03 // cmp al, byte [r12 + 3] LONG $0xd5940f41 // sete r13b - LONG $0x24743a45; BYTE $0x04 // cmp r14b, byte [r12 + 4] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x24743a45; BYTE $0x05 // cmp r14b, byte [r12 + 5] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x24743a45; BYTE $0x06 // cmp r14b, byte [r12 + 6] - WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x24743a45; BYTE $0x07 // cmp r14b, byte [r12 + 7] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x04 // cmp al, byte [r12 + 4] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x05 // cmp al, byte [r12 + 5] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x06 // cmp al, byte [r12 + 6] + WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0x1c24448b // mov eax, dword [rsp + 28] + LONG $0x24443a41; BYTE $0x07 // cmp al, byte [r12 + 7] LONG $0xd4940f41 // sete r12b - LONG $0x08713a44 // cmp r14b, byte [rcx + 8] - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x09713a44 // cmp r14b, byte [rcx + 9] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x08 // cmp al, byte [rcx + 8] + QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x09 // cmp al, byte [rcx + 9] LONG $0xd6940f40 // sete sil - LONG $0x0a713a44 // cmp r14b, byte [rcx + 10] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0a // cmp al, byte [rcx + 10] LONG $0xd7940f40 // sete dil - LONG $0x0b713a44 // cmp r14b, byte [rcx + 11] - LONG $0xd1940f41 // sete r9b - LONG $0x0c713a44 // cmp r14b, byte [rcx + 12] - LONG $0xd3940f41 // sete r11b - LONG $0x0d713a44 // cmp r14b, byte [rcx + 13] - LONG $0xd7940f41 // sete r15b - LONG $0x0e713a44 // cmp r14b, byte [rcx + 14] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x0f713a44 // cmp r14b, byte [rcx + 15] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0b // cmp al, byte [rcx + 11] LONG $0xd0940f41 // sete r8b - LONG $0x10713a44 // cmp r14b, byte [rcx + 16] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0c // cmp al, byte [rcx + 12] + LONG $0xd2940f41 // sete r10b + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0d // cmp al, byte [rcx + 13] + LONG $0xd7940f41 // sete r15b + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0e // cmp al, byte [rcx + 14] + QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x1c24448b // mov eax, dword [rsp + 28] + WORD $0x413a; BYTE $0x0f // cmp al, byte [rcx + 15] + WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x10 // cmp dl, byte [rcx + 16] QUAD $0x000001202494940f // sete byte [rsp + 288] - LONG $0x11713a44 // cmp r14b, byte [rcx + 17] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x11 // cmp dl, byte [rcx + 17] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x12 // cmp dl, byte [rcx + 18] + QUAD $0x000000982494940f // sete byte [rsp + 152] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x13 // cmp dl, byte [rcx + 19] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x14 // cmp dl, byte [rcx + 20] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x15 // cmp dl, byte [rcx + 21] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x16 // cmp dl, byte [rcx + 22] LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x12713a44 // cmp r14b, byte [rcx + 18] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x13713a44 // cmp r14b, byte [rcx + 19] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x14713a44 // cmp r14b, byte [rcx + 20] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x15713a44 // cmp r14b, byte [rcx + 21] - QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x16713a44 // cmp r14b, byte [rcx + 22] - QUAD $0x000000902494940f // sete byte [rsp + 144] - LONG $0x17713a44 // cmp r14b, byte [rcx + 23] + LONG $0x1c24548b // mov edx, dword [rsp + 28] + WORD $0x513a; BYTE $0x17 // cmp dl, byte [rcx + 23] LONG $0xd6940f41 // sete r14b LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x18 // cmp dl, byte [rcx + 24] QUAD $0x000001102494940f // sete byte [rsp + 272] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x19 // cmp dl, byte [rcx + 25] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1a // cmp dl, byte [rcx + 26] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1b // cmp dl, byte [rcx + 27] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1c // cmp dl, byte [rcx + 28] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1d // cmp dl, byte [rcx + 29] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1e // cmp dl, byte [rcx + 30] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x1c24548b // mov edx, dword [rsp + 28] WORD $0x513a; BYTE $0x1f // cmp dl, byte [rcx + 31] WORD $0x940f; BYTE $0xd2 // sete dl - WORD $0x0045; BYTE $0xd2 // add r10b, r10b - QUAD $0x0000014024940244 // add r10b, byte [rsp + 320] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0x0045; BYTE $0xc9 // add r9b, r9b + QUAD $0x00000140248c0244 // add r9b, byte [rsp + 320] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e4c041 // shl r12b, 7 - WORD $0x0841; BYTE $0xc4 // or r12b, al - WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0x0841; BYTE $0xdc // or r12b, bl + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0845; BYTE $0xcb // or r11b, r9b WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000a024b40240 // add sil, byte [rsp + 160] + QUAD $0x0000008824b40240 // add sil, byte [rsp + 136] LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0845; BYTE $0xdd // or r13b, r11b + LONG $0x245c8b44; BYTE $0x1c // mov r11d, dword [rsp + 28] LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + LONG $0x245cb60f; BYTE $0x30 // movzx ebx, byte [rsp + 48] WORD $0xe3c0; BYTE $0x04 // shl bl, 4 WORD $0x0844; BYTE $0xeb // or bl, r13b WORD $0xde89 // mov esi, ebx - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0841; BYTE $0xf9 // or r9b, dil - LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] WORD $0xe3c0; BYTE $0x05 // shl bl, 5 WORD $0x0840; BYTE $0xf3 // or bl, sil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x04e2c041 // shl r10b, 4 + WORD $0x0845; BYTE $0xc2 // or r10b, r8b LONG $0x05e7c041 // shl r15b, 5 - WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x2474b60f; BYTE $0x58 // movzx esi, byte [rsp + 88] + WORD $0x0845; BYTE $0xd7 // or r15b, r10b + QUAD $0x0000009024b4b60f // movzx esi, byte [rsp + 144] LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xf0 // or r8b, sil + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0x0841; BYTE $0xdc // or r12b, bl - WORD $0x0845; BYTE $0xf8 // or r8b, r15b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xc000 // add al, al - LONG $0x20248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 288] - LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0x0844; BYTE $0xf8 // or al, r15b + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xdb00 // add bl, bl + LONG $0x20249c02; WORD $0x0001; BYTE $0x00 // add bl, byte [rsp + 288] + WORD $0xde89 // mov esi, ebx + QUAD $0x00000098249cb60f // movzx ebx, byte [rsp + 152] WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0xc308 // or bl, al + WORD $0x0840; BYTE $0xf3 // or bl, sil WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x03 // shl bl, 3 WORD $0x0840; BYTE $0xf3 // or bl, sil WORD $0xde89 // mov esi, ebx - QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] WORD $0xe3c0; BYTE $0x04 // shl bl, 4 WORD $0x0840; BYTE $0xf3 // or bl, sil WORD $0xde89 // mov esi, ebx - QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] WORD $0xe3c0; BYTE $0x05 // shl bl, 5 WORD $0x0840; BYTE $0xf3 // or bl, sil QUAD $0x0000017824b48b48 // mov rsi, qword [rsp + 376] WORD $0x8844; BYTE $0x26 // mov byte [rsi], r12b - QUAD $0x0000009024bcb60f // movzx edi, byte [rsp + 144] + LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] LONG $0x06e7c040 // shl dil, 6 LONG $0x07e6c041 // shl r14b, 7 WORD $0x0841; BYTE $0xfe // or r14b, dil - LONG $0x01468844 // mov byte [rsi + 1], r8b + WORD $0x4688; BYTE $0x01 // mov byte [rsi + 1], al WORD $0x0841; BYTE $0xde // or r14b, bl - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xc000 // add al, al LONG $0x10248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 272] WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xd808 // or al, bl LONG $0x245cb60f; BYTE $0x20 // movzx ebx, byte [rsp + 32] @@ -11913,12 +12520,11 @@ LBB2_115: WORD $0xda08 // or dl, bl WORD $0xc208 // or dl, al LONG $0x02768844 // mov byte [rsi + 2], r14b - LONG $0x24748b44; BYTE $0x1c // mov r14d, dword [rsp + 28] WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl LONG $0x20618d4c // lea r12, [rcx + 32] LONG $0x04c68348 // add rsi, 4 QUAD $0x0000017824b48948 // mov qword [rsp + 376], rsi - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 JNE LBB2_115 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x0000018824bc8b4c // mov r15, qword [rsp + 392] @@ -11926,9 +12532,9 @@ LBB2_115: LBB2_117: WORD $0x8b44; BYTE $0x2e // mov r13d, dword [rsi] - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xda490f4d // cmovns r11, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -11946,8 +12552,8 @@ LBB2_119: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - WORD $0x894d; BYTE $0xd9 // mov r9, r11 - LONG $0x04b60f45; BYTE $0x33 // movzx r8d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf1 // mov r9, r14 + LONG $0x04b60f45; BYTE $0x36 // movzx r8d, byte [r14 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -11956,40 +12562,40 @@ LBB2_119: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x333c8841 // mov byte [r11 + rsi], dil + LONG $0x363c8841 // mov byte [r14 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB2_119 - LONG $0x01c38349 // add r11, 1 + LONG $0x01c68349 // add r14, 1 LBB2_121: - LONG $0x05fec149 // sar r14, 5 + LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB2_125 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 - QUAD $0x000000a824b4894c // mov qword [rsp + 168], r14 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 + QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 LBB2_123: - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] - QUAD $0x000000982494940f // sete byte [rsp + 152] + QUAD $0x000000a02494940f // sete byte [rsp + 160] LONG $0x046a3b44 // cmp r13d, dword [rdx + 4] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086a3b44 // cmp r13d, dword [rdx + 8] LONG $0xd6940f41 // sete r14b LONG $0x0c6a3b44 // cmp r13d, dword [rdx + 12] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000882494940f // sete byte [rsp + 136] LONG $0x106a3b44 // cmp r13d, dword [rdx + 16] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x146a3b44 // cmp r13d, dword [rdx + 20] - QUAD $0x000000902494940f // sete byte [rsp + 144] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x186a3b44 // cmp r13d, dword [rdx + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x1c6a3b44 // cmp r13d, dword [rdx + 28] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x206a3b44 // cmp r13d, dword [rdx + 32] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + QUAD $0x000000902494940f // sete byte [rsp + 144] LONG $0x246a3b44 // cmp r13d, dword [rdx + 36] LONG $0xd6940f40 // sete sil LONG $0x286a3b44 // cmp r13d, dword [rdx + 40] @@ -11997,37 +12603,37 @@ LBB2_123: LONG $0x2c6a3b44 // cmp r13d, dword [rdx + 44] LONG $0xd1940f41 // sete r9b LONG $0x306a3b44 // cmp r13d, dword [rdx + 48] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x346a3b44 // cmp r13d, dword [rdx + 52] LONG $0xd4940f41 // sete r12b LONG $0x386a3b44 // cmp r13d, dword [rdx + 56] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + QUAD $0x000000982494940f // sete byte [rsp + 152] LONG $0x3c6a3b44 // cmp r13d, dword [rdx + 60] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x406a3b44 // cmp r13d, dword [rdx + 64] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x446a3b44 // cmp r13d, dword [rdx + 68] LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x446a3b44 // cmp r13d, dword [rdx + 68] + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] LONG $0x486a3b44 // cmp r13d, dword [rdx + 72] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x4c6a3b44 // cmp r13d, dword [rdx + 76] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x506a3b44 // cmp r13d, dword [rdx + 80] - QUAD $0x000000882494940f // sete byte [rsp + 136] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x546a3b44 // cmp r13d, dword [rdx + 84] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0x586a3b44 // cmp r13d, dword [rdx + 88] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x5c6a3b44 // cmp r13d, dword [rdx + 92] LONG $0xd7940f41 // sete r15b LONG $0x606a3b44 // cmp r13d, dword [rdx + 96] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x646a3b44 // cmp r13d, dword [rdx + 100] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] LONG $0x686a3b44 // cmp r13d, dword [rdx + 104] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x6c6a3b44 // cmp r13d, dword [rdx + 108] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x706a3b44 // cmp r13d, dword [rdx + 112] QUAD $0x000001402494940f // sete byte [rsp + 320] LONG $0x746a3b44 // cmp r13d, dword [rdx + 116] @@ -12035,125 +12641,123 @@ LBB2_123: LONG $0x786a3b44 // cmp r13d, dword [rdx + 120] LONG $0x2454940f; BYTE $0x1c // sete byte [rsp + 28] LONG $0x7c6a3b44 // cmp r13d, dword [rdx + 124] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000a024940244 // add r10b, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x50 // add sil, byte [rsp + 80] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x0000009024b40240 // add sil, byte [rsp + 144] + QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x58 // movzx esi, byte [rsp + 88] + WORD $0x0845; BYTE $0xdc // or r12b, r11b + QUAD $0x0000009824b4b60f // movzx esi, byte [rsp + 152] LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xc000 // add al, al - LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b - QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xdb00 // add bl, bl + LONG $0x60245c02 // add bl, byte [rsp + 96] + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0xde89 // mov esi, ebx + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0x8841; BYTE $0x3e // mov byte [r14], dil + QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] + LONG $0x06e6c040 // shl sil, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x014b8841 // mov byte [r11 + 1], cl WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + LONG $0x01468841 // mov byte [r14 + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x035b8841 // mov byte [r11 + 3], bl + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x027e8845 // mov byte [r14 + 2], r15b + LONG $0x034e8841 // mov byte [r14 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c38349 // add r11, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c68349 // add r14, 4 + QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 JNE LBB2_123 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] LBB2_125: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_157 + LONG $0x05e3c149 // shl r11, 5 + WORD $0x394d; BYTE $0xd3 // cmp r11, r10 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 - JNE LBB2_152 + WORD $0x294d; BYTE $0xd8 // sub r8, r11 + WORD $0xf749; BYTE $0xd3 // not r11 + WORD $0x014d; BYTE $0xd3 // add r11, r10 + JNE LBB2_153 LBB2_127: WORD $0xff31 // xor edi, edi - JMP LBB2_154 + JMP LBB2_155 LBB2_128: - QUAD $0x00000178249c894c // mov qword [rsp + 376], r11 + QUAD $0x0000017824b4894c // mov qword [rsp + 376], r14 WORD $0x8949; BYTE $0xd4 // mov r12, rdx LBB2_129: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB2_157 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 @@ -12162,10 +12766,10 @@ LBB2_129: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi - QUAD $0x00000178249c8b4c // mov r11, qword [rsp + 376] + QUAD $0x0000017824b48b4c // mov r14, qword [rsp + 376] LBB2_159: - LONG $0x34343a45 // cmp r14b, byte [r12 + rsi] + LONG $0x341c3a45 // cmp r11b, byte [r12 + rsi] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xf7 // mov rdi, rsi @@ -12174,12 +12778,12 @@ LBB2_159: WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b2 // mov dl, 1 WORD $0xe2d2 // shl dl, cl - LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xda20 // and dl, bl WORD $0x3044; BYTE $0xca // xor dl, r9b - LONG $0x3b148841 // mov byte [r11 + rdi], dl - LONG $0x34743a45; BYTE $0x01 // cmp r14b, byte [r12 + rsi + 1] + LONG $0x3e148841 // mov byte [r14 + rdi], dl + LONG $0x345c3a45; BYTE $0x01 // cmp r11b, byte [r12 + rsi + 1] LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl @@ -12189,19 +12793,19 @@ LBB2_159: WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0xd030 // xor al, dl - LONG $0x3b048841 // mov byte [r11 + rdi], al + LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi JNE LBB2_159 JMP LBB2_162 LBB2_132: - QUAD $0x00000178249c894c // mov qword [rsp + 376], r11 + QUAD $0x0000017824b4894c // mov qword [rsp + 376], r14 WORD $0x8949; BYTE $0xd4 // mov r12, rdx LBB2_133: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB2_157 + JGE LBB2_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 @@ -12213,46 +12817,50 @@ LBB2_135: JMP LBB2_163 LBB2_136: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi LBB2_137: LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xc820 // and al, cl WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0x894d; BYTE $0xf7 // mov r15, r14 + LONG $0x14b60f45; BYTE $0x36 // movzx r10d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b + WORD $0x2041; BYTE $0xc3 // and r11b, al + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x361c8845 // mov byte [r14 + rsi], r11b LONG $0x02c78348 // add rdi, 2 LONG $0x422ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rdx + 8] LONG $0x10528d48 // lea rdx, [rdx + 16] - LONG $0xd1940f41 // sete r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl + LONG $0xd29b0f41 // setnp r10b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xd0 // and al, r10b + WORD $0xd8f6 // neg al + WORD $0x3044; BYTE $0xd8 // xor al, r11b WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xdb // xor bl, r11b + LONG $0x361c8841 // mov byte [r14 + rsi], bl + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi JNE LBB2_137 LBB2_138: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_157 + JE LBB2_165 LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - JMP LBB2_156 + JMP LBB2_152 LBB2_140: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -12265,8 +12873,8 @@ LBB2_141: WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf3 // mov r11, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -12274,7 +12882,7 @@ LBB2_141: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + LONG $0x361c8841 // mov byte [r14 + rsi], bl LONG $0x02c78348 // add rdi, 2 LONG $0x6a3b4466; BYTE $0x02 // cmp r13w, word [rdx + 2] LONG $0x04528d48 // lea rdx, [rdx + 4] @@ -12286,15 +12894,15 @@ LBB2_141: WORD $0xe0d2 // shl al, cl WORD $0x2044; BYTE $0xc8 // and al, r9b WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al + LONG $0x36048841 // mov byte [r14 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB2_141 LBB2_142: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_157 + JE LBB2_165 LONG $0x2a3b4466 // cmp r13w, word [rdx] - JMP LBB2_156 + JMP LBB2_157 LBB2_144: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -12307,8 +12915,8 @@ LBB2_145: WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf3 // mov r11, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -12316,7 +12924,7 @@ LBB2_145: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + LONG $0x361c8841 // mov byte [r14 + rsi], bl LONG $0x02c78348 // add rdi, 2 LONG $0x086a3b4c // cmp r13, qword [rdx + 8] LONG $0x10528d48 // lea rdx, [rdx + 16] @@ -12328,71 +12936,92 @@ LBB2_145: WORD $0xe0d2 // shl al, cl WORD $0x2044; BYTE $0xc8 // and al, r9b WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al + LONG $0x36048841 // mov byte [r14 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB2_145 LBB2_146: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_157 + JE LBB2_165 WORD $0x3b4c; BYTE $0x2a // cmp r13, qword [rdx] - JMP LBB2_156 + JMP LBB2_157 LBB2_148: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi LBB2_149: LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xc820 // and al, cl WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0x894d; BYTE $0xf7 // mov r15, r14 + LONG $0x14b60f45; BYTE $0x36 // movzx r10d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b + WORD $0x2041; BYTE $0xc3 // and r11b, al + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x361c8845 // mov byte [r14 + rsi], r11b LONG $0x02c78348 // add rdi, 2 LONG $0x422ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rdx + 4] LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd1940f41 // sete r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl + LONG $0xd29b0f41 // setnp r10b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xd0 // and al, r10b + WORD $0xd8f6 // neg al + WORD $0x3044; BYTE $0xd8 // xor al, r11b WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xdb // xor bl, r11b + LONG $0x361c8841 // mov byte [r14 + rsi], bl + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi JNE LBB2_149 LBB2_150: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_157 + JE LBB2_165 LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - JMP LBB2_156 LBB2_152: + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x06348a41 // mov sil, byte [r14 + rax] + LONG $0x07e78040 // and dil, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0xf989 // mov ecx, edi + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x061c8841 // mov byte [r14 + rax], bl + JMP LBB2_165 + +LBB2_153: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xff31 // xor edi, edi -LBB2_153: +LBB2_154: WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] + WORD $0x894d; BYTE $0xf3 // mov r11, r14 + LONG $0x0cb60f45; BYTE $0x36 // movzx r9d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -12400,7 +13029,7 @@ LBB2_153: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + LONG $0x361c8841 // mov byte [r14 + rsi], bl LONG $0x02c78348 // add rdi, 2 LONG $0x046a3b44 // cmp r13d, dword [rdx + 4] LONG $0x08528d48 // lea rdx, [rdx + 8] @@ -12412,21 +13041,21 @@ LBB2_153: WORD $0xe0d2 // shl al, cl WORD $0x2044; BYTE $0xc8 // and al, r9b WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al + LONG $0x36048841 // mov byte [r14 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB2_153 + JNE LBB2_154 -LBB2_154: +LBB2_155: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_157 + JE LBB2_165 WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] -LBB2_156: +LBB2_157: WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfa // mov rdx, rdi LONG $0x03eac148 // shr rdx, 3 - LONG $0x13348a41 // mov sil, byte [r11 + rdx] + LONG $0x16348a41 // mov sil, byte [r14 + rdx] LONG $0x07e78040 // and dil, 7 WORD $0x01b3 // mov bl, 1 WORD $0xf989 // mov ecx, edi @@ -12434,21 +13063,17 @@ LBB2_156: WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xf3 // xor bl, sil - LONG $0x131c8841 // mov byte [r11 + rdx], bl - -LBB2_157: - MOVQ 1280(SP), SP - VZEROUPPER - RET + LONG $0x161c8841 // mov byte [r14 + rdx], bl + JMP LBB2_165 LBB2_160: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi - QUAD $0x00000178249c8b4c // mov r11, qword [rsp + 376] + QUAD $0x0000017824b48b4c // mov r14, qword [rsp + 376] LBB2_161: - LONG $0x34343a45 // cmp r14b, byte [r12 + rsi] + LONG $0x341c3a45 // cmp r11b, byte [r12 + rsi] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xf7 // mov rdi, rsi @@ -12457,12 +13082,12 @@ LBB2_161: WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b2 // mov dl, 1 WORD $0xe2d2 // shl dl, cl - LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xda20 // and dl, bl WORD $0x3044; BYTE $0xca // xor dl, r9b - LONG $0x3b148841 // mov byte [r11 + rdi], dl - LONG $0x34743a45; BYTE $0x01 // cmp r14b, byte [r12 + rsi + 1] + LONG $0x3e148841 // mov byte [r14 + rdi], dl + LONG $0x345c3a45; BYTE $0x01 // cmp r11b, byte [r12 + rsi + 1] LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl @@ -12472,7 +13097,7 @@ LBB2_161: WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0xd030 // xor al, dl - LONG $0x3b048841 // mov byte [r11 + rdi], al + LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi JNE LBB2_161 @@ -12481,8 +13106,8 @@ LBB2_162: LBB2_163: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_157 - LONG $0x24343a45 // cmp r14b, byte [r12] + JE LBB2_165 + LONG $0x241c3a45 // cmp r11b, byte [r12] WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xf2 // mov rdx, rsi @@ -12497,57 +13122,61 @@ LBB2_163: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB2_157 LBB2_165: + MOVQ 1280(SP), SP + VZEROUPPER + RET + +LBB2_166: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx QUAD $0x0000019024848948 // mov qword [rsp + 400], rax QUAD $0x0000018024bc894c // mov qword [rsp + 384], r15 - LONG $0xbb048d4b // lea rax, [r11 + 4*r15] + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] QUAD $0x0000017824848948 // mov qword [rsp + 376], rax - LONG $0x6e79c1c4; BYTE $0xc6 // vmovd xmm0, r14d + LONG $0x6e79c1c4; BYTE $0xc3 // vmovd xmm0, r11d LONG $0x787de2c4; BYTE $0xc0 // vpbroadcastb ymm0, xmm0 QUAD $0x00020024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 512], ymm0 WORD $0xc031 // xor eax, eax - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 -LBB2_166: +LBB2_167: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000019824848948 // mov qword [rsp + 408], rax LONG $0x05e3c148 // shl rbx, 5 WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x20c88348 // or rax, 32 - LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax + QUAD $0x000000c024848948 // mov qword [rsp + 192], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x40c88348 // or rax, 64 - LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax + LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x60c88348 // or rax, 96 - QUAD $0x000000b024848948 // mov qword [rsp + 176], rax + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00800d48; WORD $0x0000 // or rax, 128 - LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + QUAD $0x000000a824848948 // mov qword [rsp + 168], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00a00d48; WORD $0x0000 // or rax, 160 - LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax + QUAD $0x0000008824848948 // mov qword [rsp + 136], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00c00d48; WORD $0x0000 // or rax, 192 - QUAD $0x000000a024848948 // mov qword [rsp + 160], rax + QUAD $0x0000009824848948 // mov qword [rsp + 152], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00e00d48; WORD $0x0000 // or rax, 224 - QUAD $0x0000009024848948 // mov qword [rsp + 144], rax + QUAD $0x000000f824848948 // mov qword [rsp + 248], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01000d48; WORD $0x0000 // or rax, 256 - QUAD $0x0000008824848948 // mov qword [rsp + 136], rax + QUAD $0x000000a024848948 // mov qword [rsp + 160], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01200d48; WORD $0x0000 // or rax, 288 - QUAD $0x0000009824848948 // mov qword [rsp + 152], rax + QUAD $0x000000b024848948 // mov qword [rsp + 176], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01400d48; WORD $0x0000 // or rax, 320 - QUAD $0x0000014024848948 // mov qword [rsp + 320], rax + LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x02000d48; WORD $0x0000 // or rax, 512 WORD $0x8948; BYTE $0xc1 // mov rcx, rax @@ -12556,1918 +13185,1931 @@ LBB2_166: LONG $0x1a04b60f // movzx eax, byte [rdx + rbx] LONG $0xd86ef9c5 // vmovd xmm3, eax LONG $0x0a44b60f; BYTE $0x01 // movzx eax, byte [rdx + rcx + 1] + WORD $0x8948; BYTE $0xce // mov rsi, rcx LONG $0xe06ef9c5 // vmovd xmm4, eax + WORD $0x8948; BYTE $0xd9 // mov rcx, rbx LONG $0x1a44b60f; BYTE $0x01 // movzx eax, byte [rdx + rbx + 1] LONG $0xd06e79c5 // vmovd xmm10, eax - LONG $0x0a44b60f; BYTE $0x02 // movzx eax, byte [rdx + rcx + 2] - WORD $0x8948; BYTE $0xcf // mov rdi, rcx + LONG $0x3244b60f; BYTE $0x02 // movzx eax, byte [rdx + rsi + 2] LONG $0xc86ef9c5 // vmovd xmm1, eax QUAD $0x0001e0248c7ff9c5; BYTE $0x00 // vmovdqa oword [rsp + 480], xmm1 - WORD $0x8948; BYTE $0xd9 // mov rcx, rbx LONG $0x1a44b60f; BYTE $0x02 // movzx eax, byte [rdx + rbx + 2] LONG $0xc86ef9c5 // vmovd xmm1, eax QUAD $0x0001c0248c7ff9c5; BYTE $0x00 // vmovdqa oword [rsp + 448], xmm1 - LONG $0x3a44b60f; BYTE $0x03 // movzx eax, byte [rdx + rdi + 3] + LONG $0x3244b60f; BYTE $0x03 // movzx eax, byte [rdx + rsi + 3] LONG $0xd86e79c5 // vmovd xmm11, eax LONG $0x1a44b60f; BYTE $0x03 // movzx eax, byte [rdx + rbx + 3] LONG $0xc06e79c5 // vmovd xmm8, eax - LONG $0x3a44b60f; BYTE $0x04 // movzx eax, byte [rdx + rdi + 4] + LONG $0x3244b60f; BYTE $0x04 // movzx eax, byte [rdx + rsi + 4] LONG $0xc86ef9c5 // vmovd xmm1, eax QUAD $0x0001a0248c7ff9c5; BYTE $0x00 // vmovdqa oword [rsp + 416], xmm1 LONG $0x1a44b60f; BYTE $0x04 // movzx eax, byte [rdx + rbx + 4] LONG $0xe86e79c5 // vmovd xmm13, eax - LONG $0x3a44b60f; BYTE $0x05 // movzx eax, byte [rdx + rdi + 5] + LONG $0x3244b60f; BYTE $0x05 // movzx eax, byte [rdx + rsi + 5] LONG $0xf06e79c5 // vmovd xmm14, eax LONG $0x1a44b60f; BYTE $0x05 // movzx eax, byte [rdx + rbx + 5] LONG $0xf06ef9c5 // vmovd xmm6, eax - LONG $0x3a44b60f; BYTE $0x06 // movzx eax, byte [rdx + rdi + 6] - QUAD $0x0000010024bc8948 // mov qword [rsp + 256], rdi + LONG $0x3244b60f; BYTE $0x06 // movzx eax, byte [rdx + rsi + 6] + QUAD $0x000000f024b48948 // mov qword [rsp + 240], rsi LONG $0xe06e79c5 // vmovd xmm12, eax LONG $0x1a44b60f; BYTE $0x06 // movzx eax, byte [rdx + rbx + 6] LONG $0xf86ef9c5 // vmovd xmm7, eax - LONG $0x3a44b60f; BYTE $0x07 // movzx eax, byte [rdx + rdi + 7] + LONG $0x3244b60f; BYTE $0x07 // movzx eax, byte [rdx + rsi + 7] LONG $0xd06ef9c5 // vmovd xmm2, eax LONG $0x1a44b60f; BYTE $0x07 // movzx eax, byte [rdx + rbx + 7] LONG $0xc86ef9c5 // vmovd xmm1, eax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01600d48; WORD $0x0000 // or rax, 352 - QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + QUAD $0x0000008024848948 // mov qword [rsp + 128], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01800d48; WORD $0x0000 // or rax, 384 - QUAD $0x0000012024848948 // mov qword [rsp + 288], rax + LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01a00d48; WORD $0x0000 // or rax, 416 LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax + QUAD $0x0000012024848948 // mov qword [rsp + 288], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01e00d48; WORD $0x0000 // or rax, 480 - LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + QUAD $0x0000014024848948 // mov qword [rsp + 320], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x02200d48; WORD $0x0000 // or rax, 544 - QUAD $0x000000e824848948 // mov qword [rsp + 232], rax - LONG $0x40cb8148; WORD $0x0002; BYTE $0x00 // or rbx, 576 - QUAD $0x000000a8249c8948 // mov qword [rsp + 168], rbx - WORD $0x8948; BYTE $0xc8 // mov rax, rcx + LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + WORD $0x8948; BYTE $0xd8 // mov rax, rbx + LONG $0x02400d48; WORD $0x0000 // or rax, 576 + QUAD $0x0000010824848948 // mov qword [rsp + 264], rax + WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x02600d48; WORD $0x0000 // or rax, 608 - LONG $0x24448948; BYTE $0x70 // mov qword [rsp + 112], rax - WORD $0x8949; BYTE $0xcc // mov r12, rcx - LONG $0x80cc8149; WORD $0x0002; BYTE $0x00 // or r12, 640 - QUAD $0x000000f024a4894c // mov qword [rsp + 240], r12 - WORD $0x8949; BYTE $0xce // mov r14, rcx - LONG $0xa0ce8149; WORD $0x0002; BYTE $0x00 // or r14, 672 - QUAD $0x000000f824b4894c // mov qword [rsp + 248], r14 - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - LONG $0x02c00d48; WORD $0x0000 // or rax, 704 - LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - LONG $0x02e00d48; WORD $0x0000 // or rax, 736 - WORD $0x8948; BYTE $0xc7 // mov rdi, rax - WORD $0x8949; BYTE $0xc9 // mov r9, rcx - LONG $0x00c98149; WORD $0x0003; BYTE $0x00 // or r9, 768 - QUAD $0x000000c0248c894c // mov qword [rsp + 192], r9 - WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x20cf8149; WORD $0x0003; BYTE $0x00 // or r15, 800 - QUAD $0x000000b824bc894c // mov qword [rsp + 184], r15 - WORD $0x8949; BYTE $0xcb // mov r11, rcx - LONG $0x40cb8149; WORD $0x0003; BYTE $0x00 // or r11, 832 - QUAD $0x000000e0249c894c // mov qword [rsp + 224], r11 - WORD $0x8949; BYTE $0xca // mov r10, rcx - LONG $0x60ca8149; WORD $0x0003; BYTE $0x00 // or r10, 864 - LONG $0x2454894c; BYTE $0x58 // mov qword [rsp + 88], r10 - WORD $0x8949; BYTE $0xc8 // mov r8, rcx - LONG $0x80c88149; WORD $0x0003; BYTE $0x00 // or r8, 896 - QUAD $0x000000802484894c // mov qword [rsp + 128], r8 - WORD $0x8948; BYTE $0xce // mov rsi, rcx - LONG $0xa0ce8148; WORD $0x0003; BYTE $0x00 // or rsi, 928 + QUAD $0x000000c824848948 // mov qword [rsp + 200], rax + WORD $0x8949; BYTE $0xdb // mov r11, rbx + LONG $0x80cb8149; WORD $0x0002; BYTE $0x00 // or r11, 640 + LONG $0x245c894c; BYTE $0x70 // mov qword [rsp + 112], r11 + WORD $0x8948; BYTE $0xd8 // mov rax, rbx + LONG $0x02a00d48; WORD $0x0000 // or rax, 672 + QUAD $0x000000b824848948 // mov qword [rsp + 184], rax + WORD $0x8949; BYTE $0xdc // mov r12, rbx + LONG $0xc0cc8149; WORD $0x0002; BYTE $0x00 // or r12, 704 + QUAD $0x0000009024a4894c // mov qword [rsp + 144], r12 + WORD $0x8948; BYTE $0xde // mov rsi, rbx + LONG $0xe0ce8148; WORD $0x0002; BYTE $0x00 // or rsi, 736 QUAD $0x000000d024b48948 // mov qword [rsp + 208], rsi - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x00000108248c8948 // mov qword [rsp + 264], rcx + WORD $0x8949; BYTE $0xdf // mov r15, rbx + LONG $0x00cf8149; WORD $0x0003; BYTE $0x00 // or r15, 768 + QUAD $0x000000e024bc894c // mov qword [rsp + 224], r15 + WORD $0x8949; BYTE $0xd9 // mov r9, rbx + LONG $0x20c98149; WORD $0x0003; BYTE $0x00 // or r9, 800 + QUAD $0x000000e8248c894c // mov qword [rsp + 232], r9 + WORD $0x8949; BYTE $0xd8 // mov r8, rbx + LONG $0x40c88149; WORD $0x0003; BYTE $0x00 // or r8, 832 + LONG $0x2444894c; BYTE $0x58 // mov qword [rsp + 88], r8 + WORD $0x8948; BYTE $0xdf // mov rdi, rbx + LONG $0x60cf8148; WORD $0x0003; BYTE $0x00 // or rdi, 864 + QUAD $0x000000d824bc8948 // mov qword [rsp + 216], rdi + WORD $0x8949; BYTE $0xde // mov r14, rbx + LONG $0x80ce8149; WORD $0x0003; BYTE $0x00 // or r14, 896 + LONG $0x2474894c; BYTE $0x50 // mov qword [rsp + 80], r14 + WORD $0x8949; BYTE $0xda // mov r10, rbx + LONG $0xa0ca8149; WORD $0x0003; BYTE $0x00 // or r10, 928 + LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 + WORD $0x8948; BYTE $0xd8 // mov rax, rbx + QUAD $0x00000100249c8948 // mov qword [rsp + 256], rbx LONG $0x03c00d48; WORD $0x0000 // or rax, 960 - LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax LONG $0xe0c98148; WORD $0x0003; BYTE $0x00 // or rcx, 992 - LONG $0x244c8948; BYTE $0x50 // mov qword [rsp + 80], rcx - QUAD $0x000000e824ac8b4c // mov r13, qword [rsp + 232] + LONG $0x244c8948; BYTE $0x60 // mov qword [rsp + 96], rcx + LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] LONG $0x207923c4; WORD $0x2a0c; BYTE $0x01 // vpinsrb xmm9, xmm0, byte [rdx + r13], 1 + QUAD $0x00000108249c8b48 // mov rbx, qword [rsp + 264] LONG $0x2031e3c4; WORD $0x1a04; BYTE $0x02 // vpinsrb xmm0, xmm9, byte [rdx + rbx], 2 - LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] + QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x03 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 3 - LONG $0x2079a3c4; WORD $0x2204; BYTE $0x04 // vpinsrb xmm0, xmm0, byte [rdx + r12], 4 - LONG $0x2079a3c4; WORD $0x3204; BYTE $0x05 // vpinsrb xmm0, xmm0, byte [rdx + r14], 5 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] - LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x06 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 6 - LONG $0x2079e3c4; WORD $0x3a04; BYTE $0x07 // vpinsrb xmm0, xmm0, byte [rdx + rdi], 7 - WORD $0x8949; BYTE $0xfd // mov r13, rdi - QUAD $0x000000c824bc8948 // mov qword [rsp + 200], rdi - LONG $0x2079a3c4; WORD $0x0a04; BYTE $0x08 // vpinsrb xmm0, xmm0, byte [rdx + r9], 8 - LONG $0x2079a3c4; WORD $0x3a04; BYTE $0x09 // vpinsrb xmm0, xmm0, byte [rdx + r15], 9 - LONG $0x2079a3c4; WORD $0x1a04; BYTE $0x0a // vpinsrb xmm0, xmm0, byte [rdx + r11], 10 - LONG $0x2079a3c4; WORD $0x1204; BYTE $0x0b // vpinsrb xmm0, xmm0, byte [rdx + r10], 11 - LONG $0x2079a3c4; WORD $0x0204; BYTE $0x0c // vpinsrb xmm0, xmm0, byte [rdx + r8], 12 - LONG $0x2079e3c4; WORD $0x3204; BYTE $0x0d // vpinsrb xmm0, xmm0, byte [rdx + rsi], 13 + LONG $0x2079a3c4; WORD $0x1a04; BYTE $0x04 // vpinsrb xmm0, xmm0, byte [rdx + r11], 4 + QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] + LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x05 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 5 + LONG $0x2079a3c4; WORD $0x2204; BYTE $0x06 // vpinsrb xmm0, xmm0, byte [rdx + r12], 6 + LONG $0x2079e3c4; WORD $0x3204; BYTE $0x07 // vpinsrb xmm0, xmm0, byte [rdx + rsi], 7 + LONG $0x2079a3c4; WORD $0x3a04; BYTE $0x08 // vpinsrb xmm0, xmm0, byte [rdx + r15], 8 + LONG $0x2079a3c4; WORD $0x0a04; BYTE $0x09 // vpinsrb xmm0, xmm0, byte [rdx + r9], 9 + LONG $0x2079a3c4; WORD $0x0204; BYTE $0x0a // vpinsrb xmm0, xmm0, byte [rdx + r8], 10 + LONG $0x2079e3c4; WORD $0x3a04; BYTE $0x0b // vpinsrb xmm0, xmm0, byte [rdx + rdi], 11 + LONG $0x2079a3c4; WORD $0x3204; BYTE $0x0c // vpinsrb xmm0, xmm0, byte [rdx + r14], 12 + LONG $0x2079a3c4; WORD $0x1204; BYTE $0x0d // vpinsrb xmm0, xmm0, byte [rdx + r10], 13 LONG $0x2079e3c4; WORD $0x0204; BYTE $0x0e // vpinsrb xmm0, xmm0, byte [rdx + rax], 14 LONG $0x2079e3c4; WORD $0x0a04; BYTE $0x0f // vpinsrb xmm0, xmm0, byte [rdx + rcx], 15 - LONG $0x24748b4c; BYTE $0x78 // mov r14, qword [rsp + 120] + QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] LONG $0x2061a3c4; WORD $0x321c; BYTE $0x01 // vpinsrb xmm3, xmm3, byte [rdx + r14], 1 - LONG $0x24548b4c; BYTE $0x40 // mov r10, qword [rsp + 64] - LONG $0x2061a3c4; WORD $0x121c; BYTE $0x02 // vpinsrb xmm3, xmm3, byte [rdx + r10], 2 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] - LONG $0x2061a3c4; WORD $0x221c; BYTE $0x03 // vpinsrb xmm3, xmm3, byte [rdx + r12], 3 - LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] - LONG $0x2061a3c4; WORD $0x021c; BYTE $0x04 // vpinsrb xmm3, xmm3, byte [rdx + r8], 4 - LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] + LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] + LONG $0x2061a3c4; WORD $0x221c; BYTE $0x02 // vpinsrb xmm3, xmm3, byte [rdx + r12], 2 + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] + LONG $0x2061a3c4; WORD $0x021c; BYTE $0x03 // vpinsrb xmm3, xmm3, byte [rdx + r8], 3 + QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] + LONG $0x2061a3c4; WORD $0x0a1c; BYTE $0x04 // vpinsrb xmm3, xmm3, byte [rdx + r9], 4 + QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] LONG $0x2061a3c4; WORD $0x1a1c; BYTE $0x05 // vpinsrb xmm3, xmm3, byte [rdx + r11], 5 - QUAD $0x000000a0248c8b4c // mov r9, qword [rsp + 160] - LONG $0x2061a3c4; WORD $0x0a1c; BYTE $0x06 // vpinsrb xmm3, xmm3, byte [rdx + r9], 6 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] + QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] + LONG $0x2061a3c4; WORD $0x121c; BYTE $0x06 // vpinsrb xmm3, xmm3, byte [rdx + r10], 6 + QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] LONG $0x2061a3c4; WORD $0x3a1c; BYTE $0x07 // vpinsrb xmm3, xmm3, byte [rdx + r15], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] LONG $0x2061e3c4; WORD $0x321c; BYTE $0x08 // vpinsrb xmm3, xmm3, byte [rdx + rsi], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] LONG $0x2061e3c4; WORD $0x021c; BYTE $0x09 // vpinsrb xmm3, xmm3, byte [rdx + rax], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] LONG $0x2061e3c4; WORD $0x1a1c; BYTE $0x0a // vpinsrb xmm3, xmm3, byte [rdx + rbx], 10 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] LONG $0x2061e3c4; WORD $0x0a1c; BYTE $0x0b // vpinsrb xmm3, xmm3, byte [rdx + rcx], 11 - QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0c // vpinsrb xmm3, xmm3, byte [rdx + rdi], 12 LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0d // vpinsrb xmm3, xmm3, byte [rdx + rdi], 13 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0e // vpinsrb xmm3, xmm3, byte [rdx + rdi], 14 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0f // vpinsrb xmm3, xmm3, byte [rdx + rdi], 15 - QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x01013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 1 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] - QUAD $0x02013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 2 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x0000010824ac8b4c // mov r13, qword [rsp + 264] + QUAD $0x02012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 2 + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x03013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 3 - QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] - QUAD $0x04013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 4 - QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] + LONG $0x246c8b4c; BYTE $0x70 // mov r13, qword [rsp + 112] + QUAD $0x04012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 4 + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] QUAD $0x05013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 5 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] - QUAD $0x06013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 6 + QUAD $0x0000009024ac8b4c // mov r13, qword [rsp + 144] + QUAD $0x06012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 6 + QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] QUAD $0x07012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 7 - QUAD $0x000000c024ac8b4c // mov r13, qword [rsp + 192] - QUAD $0x08012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 8 - QUAD $0x000000b824ac8b4c // mov r13, qword [rsp + 184] - QUAD $0x09012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 9 QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] - QUAD $0x0a013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 10 + QUAD $0x08013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 8 + QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] + QUAD $0x09013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 9 LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x0a013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 10 + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] QUAD $0x0b013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 11 - QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] + LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x0c013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 12 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x0d013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x0e013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 14 - LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0f013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 15 QUAD $0x0101326c2029a3c4 // vpinsrb xmm5, xmm10, byte [rdx + r14 + 1], 1 - QUAD $0x0201126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 1], 2 - QUAD $0x0301226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 1], 3 - QUAD $0x0401026c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r8 + 1], 4 + QUAD $0x0201226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 1], 2 + WORD $0x894c; BYTE $0xe7 // mov rdi, r12 + QUAD $0x0301026c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r8 + 1], 3 + QUAD $0x04010a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 1], 4 QUAD $0x05011a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 1], 5 - QUAD $0x06010a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 1], 6 + QUAD $0x0601126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 1], 6 QUAD $0x07013a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r15 + 1], 7 QUAD $0x0801326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 1], 8 QUAD $0x0901026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 9 QUAD $0x0a011a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 1], 10 QUAD $0x0b010a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 1], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 12 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 14 LONG $0x386563c4; WORD $0x01f8 // vinserti128 ymm15, ymm3, xmm0, 1 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0f0102442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 1], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x08 // movzx esi, byte [rdx + rax + 8] LONG $0xce6e79c5 // vmovd xmm9, esi LONG $0x387de3c4; WORD $0x01c4 // vinserti128 ymm0, ymm0, xmm4, 1 QUAD $0x0004c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1216], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x08 // movzx esi, byte [rdx + rax + 8] LONG $0xd66e79c5 // vmovd xmm10, esi - QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] QUAD $0x0001e024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 480] QUAD $0x010202442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 2], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x02020a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 2], 2 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] + QUAD $0x00000108248c8b4c // mov r9, qword [rsp + 264] + QUAD $0x02020a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 2], 2 + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] QUAD $0x030212442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 2], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x040202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x050202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 5 - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] - QUAD $0x06020a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 2], 6 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] - QUAD $0x07023a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 2], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x080202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 8 - WORD $0x894d; BYTE $0xec // mov r12, r13 - QUAD $0x09022a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 2], 9 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x060202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 6 + QUAD $0x07022a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 2], 7 + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] + QUAD $0x08020a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 2], 8 + QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] + QUAD $0x090222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 2], 9 + LONG $0x246c8b4c; BYTE $0x58 // mov r13, qword [rsp + 88] QUAD $0x0a022a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 2], 10 - LONG $0x245c8b4c; BYTE $0x58 // mov r11, qword [rsp + 88] + QUAD $0x000000d8249c8b4c // mov r11, qword [rsp + 216] QUAD $0x0b021a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 2], 11 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] + LONG $0x24748b4c; BYTE $0x50 // mov r14, qword [rsp + 80] QUAD $0x0c0232442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 2], 12 - QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] QUAD $0x0d023a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 2], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0e0202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0f0202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 15 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] QUAD $0x0001c0249c6ff9c5; BYTE $0x00 // vmovdqa xmm3, oword [rsp + 448] QUAD $0x0102025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 2], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x0202325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x02023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0302325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0402325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x0502325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0602325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] QUAD $0x0702325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 7 - QUAD $0x00000088249c8b48 // mov rbx, qword [rsp + 136] + QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] QUAD $0x08021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 8 - QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] - QUAD $0x09021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 10 - QUAD $0x000000d8249c8b48 // mov rbx, qword [rsp + 216] - QUAD $0x0b021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 11 - QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 12 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x0d021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 13 - LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] - QUAD $0x0e021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 14 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] - QUAD $0x0f021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 15 + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x09023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 9 + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + QUAD $0x0a023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 10 + QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] + QUAD $0x0b023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 11 + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + QUAD $0x0c023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 12 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0d023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 13 + QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] + QUAD $0x0e023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 14 + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + QUAD $0x0f023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 15 QUAD $0x010302642021a3c4 // vpinsrb xmm4, xmm11, byte [rdx + r8 + 3], 1 - QUAD $0x02030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 2 + QUAD $0x02030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 2 QUAD $0x030312642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 3], 3 - QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] - QUAD $0x04031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 4 - QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] - QUAD $0x05030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 5 - QUAD $0x06030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 6 - QUAD $0x07033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 7 - QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] - QUAD $0x08033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 8 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x04033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 4 + QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] + QUAD $0x050302642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 3], 5 + QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] + QUAD $0x06033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 6 + QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] + QUAD $0x07030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 7 + QUAD $0x08030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 8 QUAD $0x090322642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 3], 9 QUAD $0x0a032a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 3], 10 QUAD $0x0b031a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 3], 11 + WORD $0x894d; BYTE $0xdc // mov r12, r11 QUAD $0x0c0332642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 3], 12 QUAD $0x0d033a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 3], 13 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] - QUAD $0x0e030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 14 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] - QUAD $0x0f033a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 3], 15 - QUAD $0x0103026c2039e3c4 // vpinsrb xmm5, xmm8, byte [rdx + rax + 3], 1 LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] - QUAD $0x02031a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 3], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0e031a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 3], 14 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x0f030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 15 + QUAD $0x0103026c2039e3c4 // vpinsrb xmm5, xmm8, byte [rdx + rax + 3], 1 + WORD $0x8949; BYTE $0xc5 // mov r13, rax + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0203026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0303026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0403026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 4 - LONG $0x24548b4c; BYTE $0x60 // mov r10, qword [rsp + 96] - QUAD $0x0503126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 3], 5 - QUAD $0x000000a024b48b4c // mov r14, qword [rsp + 160] - QUAD $0x0603326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 3], 6 - QUAD $0x0703326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 7 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] - QUAD $0x0803026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 8 - QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] - QUAD $0x09031a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 3], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0503026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x0603026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 6 + QUAD $0x0703326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 7 + QUAD $0x08031a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 3], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0903026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 12 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 13 LONG $0x3865e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm3, xmm0, 1 QUAD $0x0001e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 480], ymm0 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e0302442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 3], 14 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x09 // movzx esi, byte [rdx + rax + 9] + QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] + LONG $0x0a74b60f; BYTE $0x09 // movzx esi, byte [rdx + rcx + 9] LONG $0xc66e79c5 // vmovd xmm8, esi - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0322442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 3], 15 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f0302442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 3], 15 LONG $0x387de3c4; WORD $0x01c4 // vinserti128 ymm0, ymm0, xmm4, 1 QUAD $0x0001c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 448], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x09 // movzx esi, byte [rdx + rax + 9] + QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] + LONG $0x0a74b60f; BYTE $0x09 // movzx esi, byte [rdx + rcx + 9] LONG $0xde6e79c5 // vmovd xmm11, esi + LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] QUAD $0x0001a024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 416] - QUAD $0x010402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x020402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x030402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 3 - QUAD $0x000000f024ac8b4c // mov r13, qword [rsp + 240] - QUAD $0x04042a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 4], 4 - QUAD $0x05040a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 4], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x060402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x070402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 7 - QUAD $0x08043a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 4], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 10 + QUAD $0x010432442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 4], 1 + QUAD $0x0000010824bc8b4c // mov r15, qword [rsp + 264] + QUAD $0x02043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 2 + QUAD $0x030412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 4], 3 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x04043a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 4], 4 + WORD $0x894c; BYTE $0xc1 // mov rcx, r8 + QUAD $0x050402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 5 + QUAD $0x00000090249c8b48 // mov rbx, qword [rsp + 144] + QUAD $0x06041a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 4], 6 + QUAD $0x07040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 7 + QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] + QUAD $0x08040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 8 + QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] + QUAD $0x090402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 9 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 13 - QUAD $0x0e040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 14 - QUAD $0x0f043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 15 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x0104025c2011e3c4 // vpinsrb xmm3, xmm13, byte [rdx + rax + 4], 1 - QUAD $0x02041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 2 - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] - QUAD $0x03041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0404025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 4 - QUAD $0x0504125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 4], 5 - WORD $0x894c; BYTE $0xf6 // mov rsi, r14 - QUAD $0x0604325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 4], 6 - QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x0704125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 4], 7 - QUAD $0x00000088248c8b4c // mov r9, qword [rsp + 136] - QUAD $0x08040a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 4], 8 - QUAD $0x09041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 10 - QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] - QUAD $0x0b04325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 4], 11 - QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 12 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x0d041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 13 - LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] - QUAD $0x0e043a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 4], 14 - QUAD $0x0f04225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 4], 15 - QUAD $0x010502642009a3c4 // vpinsrb xmm4, xmm14, byte [rdx + r8 + 5], 1 - QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] + QUAD $0x0a0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 10 + QUAD $0x0b0422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 4], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c0432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 4], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d0432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 4], 13 + QUAD $0x0e041a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 4], 14 + LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] + QUAD $0x0f0422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 4], 15 + QUAD $0x01042a5c2011a3c4 // vpinsrb xmm3, xmm13, byte [rdx + r13 + 4], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0204325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0304325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0404325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x0504025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x0604325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 6 + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x0704025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0804025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0904025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 10 + QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] + QUAD $0x0b041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 11 + LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] + QUAD $0x0c041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 15 + QUAD $0x010532642009a3c4 // vpinsrb xmm4, xmm14, byte [rdx + r14 + 5], 1 QUAD $0x02053a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 5], 2 - LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] - QUAD $0x03051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 3 - QUAD $0x04052a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 5], 4 + WORD $0x894d; BYTE $0xfe // mov r14, r15 + QUAD $0x030512642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 5], 3 + QUAD $0x04053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 4 QUAD $0x05050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x07050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 7 - QUAD $0x08053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x09050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 10 - LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] - QUAD $0x0b053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 11 - QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] - QUAD $0x0c053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 12 - QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] - QUAD $0x0d052a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 5], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] - QUAD $0x0e053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 14 - LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] - QUAD $0x0f053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 15 - LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] - QUAD $0x01053a6c2049e3c4 // vpinsrb xmm5, xmm6, byte [rdx + rdi + 5], 1 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x02053a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 5], 2 - QUAD $0x03051a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 5], 3 + WORD $0x8949; BYTE $0xca // mov r10, rcx + QUAD $0x06051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 6 + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + QUAD $0x07053a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 5], 7 + QUAD $0x08050a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 5], 8 + QUAD $0x090502642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 5], 9 + LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] + QUAD $0x0a0502642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 5], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 14 + QUAD $0x0f0522642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 5], 15 + QUAD $0x01052a6c2049a3c4 // vpinsrb xmm5, xmm6, byte [rdx + r13 + 5], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0205026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0305026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 3 + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0405026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x0505026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 5 QUAD $0x0605326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 5], 6 - QUAD $0x0705126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 5], 7 - QUAD $0x08050a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 5], 8 - QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] - QUAD $0x09050a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 5], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x000000f8249c8b48 // mov rbx, qword [rsp + 248] + QUAD $0x07051a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 5], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0805026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0905026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 10 - QUAD $0x0b05326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 5], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 12 + QUAD $0x0000008024ac8b4c // mov r13, qword [rsp + 128] + QUAD $0x0b052a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 5], 11 + QUAD $0x0c051a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 5], 12 + WORD $0x894d; BYTE $0xdc // mov r12, r11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 14 LONG $0x386563c4; WORD $0x01f0 // vinserti128 ymm14, ymm3, xmm0, 1 - QUAD $0x0f0522442051a3c4 // vpinsrb xmm0, xmm5, byte [rdx + r12 + 5], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f0502442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 5], 15 + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x0a // movzx esi, byte [rdx + rax + 10] LONG $0xde6ef9c5 // vmovd xmm3, esi LONG $0x387de3c4; WORD $0x01c4 // vinserti128 ymm0, ymm0, xmm4, 1 QUAD $0x0001a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 416], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x0a // movzx esi, byte [rdx + rax + 10] + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x0a // movzx esi, byte [rdx + rsi + 10] LONG $0xe66ef9c5 // vmovd xmm4, esi - WORD $0x894d; BYTE $0xc6 // mov r14, r8 - QUAD $0x010602442019a3c4 // vpinsrb xmm0, xmm12, byte [rdx + r8 + 6], 1 - QUAD $0x02063a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 6], 2 - QUAD $0x03061a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 6], 3 - QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] - QUAD $0x04061a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 6], 4 - QUAD $0x000000f824848b4c // mov r8, qword [rsp + 248] - QUAD $0x050602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + QUAD $0x01063a442019e3c4 // vpinsrb xmm0, xmm12, byte [rdx + rdi + 6], 1 + QUAD $0x020632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 6], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x030602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 3 + LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] + QUAD $0x040632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 6], 4 + QUAD $0x050612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 6], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] QUAD $0x060602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 6 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] - QUAD $0x07063a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 6], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x080602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 9 - QUAD $0x0a060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 10 - LONG $0x24548b4c; BYTE $0x58 // mov r10, qword [rsp + 88] - QUAD $0x0b0612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 6], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c0602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 12 - QUAD $0x0d062a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 6], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 15 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] + QUAD $0x07063a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 6], 7 + QUAD $0x08060a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 6], 8 + QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + QUAD $0x090632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 6], 9 + QUAD $0x0a0602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 10 + QUAD $0x0b060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 11 + LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + QUAD $0x0c0602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 12 + LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] + QUAD $0x0d060a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 6], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e0632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 6], 14 + LONG $0x24548b4c; BYTE $0x60 // mov r10, qword [rsp + 96] + QUAD $0x0f0612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 6], 15 + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] QUAD $0x01060a6c2041e3c4 // vpinsrb xmm5, xmm7, byte [rdx + rcx + 6], 1 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x02060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] - QUAD $0x03060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 3 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] - QUAD $0x04060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0206326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0306326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0406326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x0506326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 5 - QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] - QUAD $0x06061a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 6], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x07060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 8 - QUAD $0x09060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x06060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 6 + QUAD $0x07061a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 6], 7 + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] + QUAD $0x08061a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 6], 8 + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x09061a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 6], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0a060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 10 - QUAD $0x000000d824a48b4c // mov r12, qword [rsp + 216] - QUAD $0x0b06226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 11 - QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] - QUAD $0x0c060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 12 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] - QUAD $0x0d062a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 6], 13 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + WORD $0x894d; BYTE $0xef // mov r15, r13 + QUAD $0x0b062a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 6], 11 + QUAD $0x0c06226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 12 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x0d060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 13 + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] QUAD $0x0e060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 14 - LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] + QUAD $0x0000014024ac8b4c // mov r13, qword [rsp + 320] QUAD $0x0f062a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 6], 15 - QUAD $0x010732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 7], 1 - QUAD $0x02073a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 7], 2 - LONG $0x246c8b4c; BYTE $0x70 // mov r13, qword [rsp + 112] - QUAD $0x03072a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 7], 3 - QUAD $0x04071a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 7], 4 - QUAD $0x050702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 7], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 6 - QUAD $0x07073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 7 - QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] - QUAD $0x080732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 7], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x09070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 10 - QUAD $0x0b0712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 7], 11 - QUAD $0x0c0702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 12 + QUAD $0x01073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 1 + QUAD $0x0000010824ac8b4c // mov r13, qword [rsp + 264] + QUAD $0x02072a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 7], 2 + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x03073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 3 + QUAD $0x040732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 7], 4 + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x05073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 5 + QUAD $0x060702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 6 QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d0702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 13 - LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] - QUAD $0x0e073a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 7], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 15 + QUAD $0x070702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 7 + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] + QUAD $0x08070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x09070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 9 + LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x0a073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 10 + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x0b073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 11 + QUAD $0x0c0702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 7], 12 + QUAD $0x0d070a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 7], 13 + LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + QUAD $0x0e070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 14 + QUAD $0x0f0712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 7], 15 + QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] + QUAD $0x0107324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 7], 1 LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x01070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 1 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x02073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x02070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 2 + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x03070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 3 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] - QUAD $0x04073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 4 + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x04070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 4 QUAD $0x0507324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 7], 5 - QUAD $0x06071a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 7], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0707324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 7], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 8 - QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] - QUAD $0x09073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x06070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 6 + QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] + QUAD $0x07070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 7 + QUAD $0x08071a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 7], 8 + QUAD $0x09071a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 7], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0a070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 10 - QUAD $0x0b07224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 7], 11 - QUAD $0x0c070a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 7], 12 + QUAD $0x0b073a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 7], 11 + QUAD $0x0c07224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 7], 12 LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x0d070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 13 LONG $0x3855e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm5, xmm0, 1 QUAD $0x0004a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1184], ymm0 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] QUAD $0x0e070a442071e3c4 // vpinsrb xmm0, xmm1, byte [rdx + rcx + 7], 14 - QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] + QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] LONG $0x0a74b60f; BYTE $0x0b // movzx esi, byte [rdx + rcx + 11] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0722442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 7], 15 + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x0f070a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 7], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00048024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1152], ymm0 - QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] + QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] LONG $0x0a74b60f; BYTE $0x0b // movzx esi, byte [rdx + rcx + 11] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x01080a442031e3c4 // vpinsrb xmm0, xmm9, byte [rdx + rcx + 8], 1 - QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] - QUAD $0x020802442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 8], 2 - QUAD $0x03082a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 8], 3 - WORD $0x894d; BYTE $0xdd // mov r13, r11 - QUAD $0x04081a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 8], 4 - QUAD $0x000000f8249c8b4c // mov r11, qword [rsp + 248] - QUAD $0x05081a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 8], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 6 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] - QUAD $0x070832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 7 - QUAD $0x080832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 8], 8 - QUAD $0x000000b824948b4c // mov r10, qword [rsp + 184] - QUAD $0x090812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 8], 9 - QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] + WORD $0x894d; BYTE $0xef // mov r15, r13 + QUAD $0x02082a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 8], 2 + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x03080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x040832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 4 + QUAD $0x000000b824ac8b4c // mov r13, qword [rsp + 184] + QUAD $0x05082a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 8], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x060832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 6 + WORD $0x8949; BYTE $0xc0 // mov r8, rax + QUAD $0x070802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 7 + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x08083a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 8], 8 + QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] + QUAD $0x090822442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 8], 9 + LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] QUAD $0x0a081a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 8], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b0832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c0832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 12 + QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] + QUAD $0x0b080a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 8], 11 + LONG $0x24548b4c; BYTE $0x50 // mov r10, qword [rsp + 80] + QUAD $0x0c0812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 8], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d0802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 13 - QUAD $0x0e083a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 8], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0f0802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 15 + QUAD $0x0108326c2029a3c4 // vpinsrb xmm5, xmm10, byte [rdx + r14 + 8], 1 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x0108026c2029e3c4 // vpinsrb xmm5, xmm10, byte [rdx + rax + 8], 1 - LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] - QUAD $0x02080a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 8], 2 - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] - QUAD $0x03083a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r15 + 8], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x0208026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0308026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0408326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 8], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0508026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 5 - QUAD $0x000000a024b48b4c // mov r14, qword [rsp + 160] - QUAD $0x0608326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 8], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x0708026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 7 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] - QUAD $0x0808026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 8 - QUAD $0x09083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0508026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x0608026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 6 + QUAD $0x000000f824b48b4c // mov r14, qword [rsp + 248] + QUAD $0x0708326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 8], 7 + QUAD $0x08081a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 8], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0908026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 11 - QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] - QUAD $0x0c083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 12 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] - QUAD $0x0d083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 13 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] - QUAD $0x0e083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 14 - QUAD $0x0f08226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 8], 15 - QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] - QUAD $0x010922742039a3c4 // vpinsrb xmm6, xmm8, byte [rdx + r12 + 9], 1 - QUAD $0x020902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 2 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] - QUAD $0x03093a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rdi + 9], 3 - QUAD $0x04092a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r13 + 9], 4 - QUAD $0x05091a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r11 + 9], 5 - QUAD $0x06090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x07090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x08090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 8 - QUAD $0x090912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 9 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 15 + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x010902742039e3c4 // vpinsrb xmm6, xmm8, byte [rdx + rax + 9], 1 + QUAD $0x02093a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r15 + 9], 2 + QUAD $0x03090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x040902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 4 + QUAD $0x05092a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r13 + 9], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x060902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 6 + QUAD $0x070902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 7 + QUAD $0x08093a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rdi + 9], 8 + QUAD $0x090922742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r12 + 9], 9 QUAD $0x0a091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 10 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x0b090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 11 - QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] - QUAD $0x0c091a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r11 + 9], 12 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] - QUAD $0x0d090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 14 - LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] - QUAD $0x0f0922742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r12 + 9], 15 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x01090a7c2021e3c4 // vpinsrb xmm7, xmm11, byte [rdx + rcx + 9], 1 - QUAD $0x02090a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r9 + 9], 2 - QUAD $0x03093a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 9], 3 + QUAD $0x0b090a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r9 + 9], 11 + QUAD $0x0c0912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 12 + LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] + QUAD $0x0d0912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f0902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x0109027c2021e3c4 // vpinsrb xmm7, xmm11, byte [rdx + rax + 9], 1 + LONG $0x246c8b4c; BYTE $0x78 // mov r13, qword [rsp + 120] + QUAD $0x02092a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 9], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0309027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 3 QUAD $0x0409327c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rsi + 9], 4 - LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] - QUAD $0x05092a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 9], 5 - QUAD $0x0609327c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r14 + 9], 6 - QUAD $0x00000090249c8b48 // mov rbx, qword [rsp + 144] - QUAD $0x07091a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rbx + 9], 7 - QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] - QUAD $0x08093a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 9], 8 - QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - QUAD $0x09090a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rcx + 9], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] - QUAD $0x0a090a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rcx + 9], 10 + QUAD $0x00000088249c8b48 // mov rbx, qword [rsp + 136] + QUAD $0x05091a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rbx + 9], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x0609027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 6 + QUAD $0x0709327c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r14 + 9], 7 + QUAD $0x08091a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r11 + 9], 8 + WORD $0x894d; BYTE $0xde // mov r14, r11 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0909027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 10 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 12 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 14 LONG $0x3855e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm5, xmm0, 1 QUAD $0x00046024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1120], ymm0 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0f09026c2041e3c4 // vpinsrb xmm5, xmm7, byte [rdx + rax + 9], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x0c // movzx esi, byte [rdx + rax + 12] LONG $0xc66ef9c5 // vmovd xmm0, esi LONG $0x3855e3c4; WORD $0x01ee // vinserti128 ymm5, ymm5, xmm6, 1 QUAD $0x00044024ac7ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1088], ymm5 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x0c // movzx esi, byte [rdx + rax + 12] LONG $0xee6ef9c5 // vmovd xmm5, esi - QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] - QUAD $0x010a3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 10], 1 - QUAD $0x020a025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 10], 2 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x010a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 1 + QUAD $0x020a3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 10], 2 QUAD $0x030a0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 10], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] - QUAD $0x040a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x050a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x060a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 6 - QUAD $0x000000c8248c8b4c // mov r9, qword [rsp + 200] - QUAD $0x070a0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 10], 7 - QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] - QUAD $0x080a325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 10], 8 - QUAD $0x090a125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 10], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x040a3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 10], 4 + QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] + QUAD $0x050a025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 10], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x060a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x070a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 7 + QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] + QUAD $0x080a0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 10], 8 + QUAD $0x090a225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 10], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a0a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] QUAD $0x0b0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 11 - QUAD $0x0c0a1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 10], 12 - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 12 QUAD $0x0d0a125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 10], 13 - LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] - QUAD $0x0e0a1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 10], 14 - QUAD $0x0f0a225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 10], 15 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x010a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 1 LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x020a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x0e0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f0a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x010a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 1 + QUAD $0x020a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x030a32642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rsi + 10], 3 - LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] - QUAD $0x040a22642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 10], 4 - QUAD $0x050a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x060a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 6 + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] + QUAD $0x040a1a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 10], 4 + QUAD $0x050a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 5 + QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] + QUAD $0x060a12642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 10], 6 + QUAD $0x000000f8249c8b48 // mov rbx, qword [rsp + 248] QUAD $0x070a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 7 - QUAD $0x080a3a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 10], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x090a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 10 - QUAD $0x000000d824bc8b4c // mov r15, qword [rsp + 216] - QUAD $0x0b0a3a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 10], 11 + QUAD $0x080a32642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 10], 8 + QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] + QUAD $0x090a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 9 + LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] + QUAD $0x0a0a32642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 10], 10 + QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] + QUAD $0x0b0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c0a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 12 + LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + QUAD $0x0d0a22642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 10], 13 QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 12 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x0d0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 13 - LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] - QUAD $0x0e0a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 14 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + QUAD $0x0e0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 14 + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0f0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 15 - QUAD $0x010b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 1 - QUAD $0x020b024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 11], 2 + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x010b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 1 + QUAD $0x020b3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 11], 2 QUAD $0x030b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 3 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] - QUAD $0x040b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 4 - QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] - QUAD $0x050b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 5 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] - QUAD $0x060b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 6 - QUAD $0x070b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 7 - QUAD $0x080b324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 11], 8 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] - QUAD $0x090b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 9 - QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] - QUAD $0x0a0b324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 11], 10 - LONG $0x244c8b4c; BYTE $0x58 // mov r9, qword [rsp + 88] - QUAD $0x0b0b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 11 - QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] - QUAD $0x0c0b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 12 - QUAD $0x0d0b124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 11], 13 - QUAD $0x0e0b1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 11], 14 - LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] - QUAD $0x0f0b1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 11], 15 + QUAD $0x040b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 4 + QUAD $0x050b024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 11], 5 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x060b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x070b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 7 + QUAD $0x080b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 8 + QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + QUAD $0x090b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 13 + LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] + QUAD $0x0e0b024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 11], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x010b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 1 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] - QUAD $0x010b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 11], 1 - LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] - QUAD $0x020b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 2 + QUAD $0x020b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 11], 2 QUAD $0x030b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 3 - QUAD $0x040b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 11], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x040b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 11], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x050b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 5 - QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] - QUAD $0x060b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 6 - QUAD $0x0000009024848b4c // mov r8, qword [rsp + 144] - QUAD $0x070b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 11], 7 - QUAD $0x0000008824a48b4c // mov r12, qword [rsp + 136] - QUAD $0x080b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 11], 8 - QUAD $0x090b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 10 - QUAD $0x0b0b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 11], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 13 + QUAD $0x060b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 11], 6 + QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] + QUAD $0x070b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 11], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x080b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 8 + QUAD $0x090b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 11], 9 + QUAD $0x0a0b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 11], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b0b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 11 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x0c0b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 12 + QUAD $0x0d0b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 11], 13 LONG $0x385de3c4; WORD $0x01db // vinserti128 ymm3, ymm4, xmm3, 1 QUAD $0x000420249c7ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1056], ymm3 + QUAD $0x0000012024ac8b4c // mov r13, qword [rsp + 288] QUAD $0x0e0b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 11], 14 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x0d // movzx esi, byte [rdx + rax + 13] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x0d // movzx esi, byte [rdx + rsi + 13] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x0f0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 15 + QUAD $0x0f0b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 15 LONG $0x386de3c4; WORD $0x01c9 // vinserti128 ymm1, ymm2, xmm1, 1 QUAD $0x000400248c7ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1024], ymm1 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x0d // movzx esi, byte [rdx + rax + 13] + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x0d // movzx esi, byte [rdx + rsi + 13] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] - QUAD $0x010c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x020c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x030c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 3 - QUAD $0x040c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 4 - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x050c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 12], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x060c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x070c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x080c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 9 + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x010c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 1 + QUAD $0x0000010824a48b4c // mov r12, qword [rsp + 264] + QUAD $0x020c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 12], 2 + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x030c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 12], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x040c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 4 + QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] + QUAD $0x050c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 5 + QUAD $0x060c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 6 + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x070c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x080c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 8 + QUAD $0x000000e8248c8b4c // mov r9, qword [rsp + 232] + QUAD $0x090c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 12], 9 + LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] QUAD $0x0a0c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 12], 10 - QUAD $0x0b0c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 12], 11 - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] - QUAD $0x0c0c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d0c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 13 - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] - QUAD $0x0e0c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 12], 14 - QUAD $0x0f0c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 12], 15 - QUAD $0x010c3a542051e3c4 // vpinsrb xmm2, xmm5, byte [rdx + rdi + 12], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x020c32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 12], 2 - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] - QUAD $0x030c32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 12], 3 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] - QUAD $0x040c3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 12], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x050c3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 12], 5 - QUAD $0x060c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 6 - QUAD $0x070c02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 12], 7 - WORD $0x894c; BYTE $0xe0 // mov rax, r12 - QUAD $0x080c22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 12], 8 - QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] - QUAD $0x090c1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 12], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 10 QUAD $0x000000d8249c8b48 // mov rbx, qword [rsp + 216] - QUAD $0x0b0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 11 - QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 12 - LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] - QUAD $0x0d0c0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 12], 13 - LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] - QUAD $0x0e0c02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 12], 14 - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0c22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 12], 15 - QUAD $0x000000e8249c8b48 // mov rbx, qword [rsp + 232] - QUAD $0x010d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 1 - QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] - QUAD $0x020d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 2 - LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] - QUAD $0x030d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 3 - QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] - QUAD $0x040d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 4 - QUAD $0x050d125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 13], 5 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] - QUAD $0x060d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 6 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] - QUAD $0x070d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 7 - QUAD $0x000000c0249c8b48 // mov rbx, qword [rsp + 192] - QUAD $0x080d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 8 - QUAD $0x000000b824a48b4c // mov r12, qword [rsp + 184] - QUAD $0x090d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 13], 9 - QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] - QUAD $0x0a0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 10 - LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] + QUAD $0x0b0c1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 12], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c0c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d0c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 13 + QUAD $0x0e0c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 12], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f0c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 15 + QUAD $0x010c02542051e3c4 // vpinsrb xmm2, xmm5, byte [rdx + rax + 12], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x020c32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 12], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x030c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 3 + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x040c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 4 + QUAD $0x0000008824948b4c // mov r10, qword [rsp + 136] + QUAD $0x050c12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 12], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x060c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 6 + QUAD $0x070c3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 12], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x080c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 8 + QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x090c02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 12], 9 + LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] + QUAD $0x0a0c1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 12], 10 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x0b0c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c0c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 12 + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + QUAD $0x0d0c3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 12], 13 + QUAD $0x0e0c2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 12], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f0c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 15 + LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] + QUAD $0x010d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 13], 1 + QUAD $0x020d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 13], 2 + QUAD $0x030d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 13], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x040d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 13], 4 + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x050d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 13], 5 + QUAD $0x0000009024a48b4c // mov r12, qword [rsp + 144] + QUAD $0x060d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 13], 6 + QUAD $0x070d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 13], 7 + QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] + QUAD $0x080d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 13], 8 + QUAD $0x090d0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 13], 9 + QUAD $0x0a0d325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 13], 10 QUAD $0x0b0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 11 + LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] QUAD $0x0c0d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 13], 12 - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] - QUAD $0x0d0d125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 13], 13 - QUAD $0x0e0d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 13], 14 - LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] - QUAD $0x0f0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 15 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x0d0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 13], 14 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x0f0d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 13], 15 + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] QUAD $0x010d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 1 QUAD $0x020d324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 13], 2 - QUAD $0x030d324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 13], 3 - QUAD $0x040d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 4 - QUAD $0x050d3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 13], 5 - QUAD $0x000000a024b48b4c // mov r14, qword [rsp + 160] - QUAD $0x060d324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 13], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x070d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 7 - QUAD $0x080d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 8 - QUAD $0x090d1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 13], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x030d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 3 + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x040d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 4 + QUAD $0x050d124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 13], 5 + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x060d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 6 + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] + QUAD $0x070d324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 13], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x080d324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 13], 8 + QUAD $0x090d024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 13], 9 + QUAD $0x0a0d1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 13], 10 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 12 - QUAD $0x0d0d0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 13], 13 - QUAD $0x0e0d024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 13], 14 + QUAD $0x0d0d3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 13], 13 + QUAD $0x0000012024b48b4c // mov r14, qword [rsp + 288] + QUAD $0x0e0d324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 13], 14 LONG $0x386de3c4; WORD $0x01c0 // vinserti128 ymm0, ymm2, xmm0, 1 QUAD $0x0003e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 992], ymm0 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0f0d02442071e3c4 // vpinsrb xmm0, xmm1, byte [rdx + rax + 13], 15 - QUAD $0x0000010024ac8b4c // mov r13, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x0e2a // movzx esi, byte [rdx + r13 + 14] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x0274b60f; BYTE $0x0e // movzx esi, byte [rdx + rax + 14] LONG $0xce6ef9c5 // vmovd xmm1, esi LONG $0x387de3c4; WORD $0x01c3 // vinserti128 ymm0, ymm0, xmm3, 1 QUAD $0x0003c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 960], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x0e // movzx esi, byte [rdx + rax + 14] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x010e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x020e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 2 - LONG $0x24448b4c; BYTE $0x70 // mov r8, qword [rsp + 112] - QUAD $0x030e024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 14], 3 - QUAD $0x000000f0248c8b4c // mov r9, qword [rsp + 240] - QUAD $0x040e0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 14], 4 - QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] - QUAD $0x050e3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 14], 5 - LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] - QUAD $0x060e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 14], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x070e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x080e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 8 - QUAD $0x090e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 14], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a0e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 10 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x0b0e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c0e324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 14], 12 - QUAD $0x0d0e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 14], 13 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x020e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 14], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x030e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x040e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 4 + QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] + QUAD $0x050e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 14], 5 + QUAD $0x060e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 14], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x070e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 7 + QUAD $0x080e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 14], 8 + QUAD $0x000000e824ac8b4c // mov r13, qword [rsp + 232] + QUAD $0x090e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 14], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 12 + QUAD $0x0d0e1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 14], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x010e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 14], 1 + LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] + QUAD $0x020e1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 14], 2 + QUAD $0x030e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 3 + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x040e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 14], 4 + QUAD $0x00000088248c8b4c // mov r9, qword [rsp + 136] + QUAD $0x050e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 14], 5 + QUAD $0x060e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 6 + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x070e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 14], 7 + QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] + QUAD $0x080e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 14], 8 + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x090e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x0a0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 10 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x0b0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 11 LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e0e324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 14], 14 - QUAD $0x0f0e1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 14], 15 - LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] - QUAD $0x010e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 14], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x020e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] - QUAD $0x030e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x040e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x050e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 5 - QUAD $0x060e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 14], 6 - QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x070e12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 14], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] - QUAD $0x080e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 8 - QUAD $0x0000009824a48b4c // mov r12, qword [rsp + 152] - QUAD $0x090e22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 14], 9 - QUAD $0x0000014024b48b4c // mov r14, qword [rsp + 320] - QUAD $0x0a0e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 14], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] - QUAD $0x0b0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] QUAD $0x0c0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 12 - LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] - QUAD $0x0d0e1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 14], 13 - LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] - QUAD $0x0e0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - QUAD $0x0f0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 15 - LONG $0x74b60f42; WORD $0x0f2a // movzx esi, byte [rdx + r13 + 15] + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] + QUAD $0x0d0e02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 14], 13 + QUAD $0x0e0e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 14], 14 + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + QUAD $0x0f0e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 15 + QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] + LONG $0x74b60f42; WORD $0x0f32 // movzx esi, byte [rdx + r14 + 15] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x010f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x020f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 2 - QUAD $0x030f02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 15], 3 - QUAD $0x040f0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 15], 4 - QUAD $0x050f3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 15], 5 - QUAD $0x060f3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 15], 6 - QUAD $0x000000c824ac8b4c // mov r13, qword [rsp + 200] - QUAD $0x070f2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 15], 7 - QUAD $0x000000c024848b4c // mov r8, qword [rsp + 192] - QUAD $0x080f02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 15], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 9 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] - QUAD $0x0a0f3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 15], 10 - QUAD $0x0b0f0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 15], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x010f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 1 + QUAD $0x020f12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 15], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x030f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 3 + LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] + QUAD $0x040f12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 15], 4 + QUAD $0x050f3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 15], 5 + QUAD $0x060f22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 15], 6 + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + QUAD $0x070f3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 15], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x080f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 8 + QUAD $0x090f2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 15], 9 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] + QUAD $0x0a0f22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 15], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c0f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e0f0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 15], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 15 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e0f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f0f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 15 + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x0f // movzx esi, byte [rdx + rax + 15] LONG $0xde6ef9c5 // vmovd xmm3, esi - QUAD $0x010f1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 15], 1 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x020f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 2 + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x010f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 1 + QUAD $0x020f1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 15], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x030f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x040f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 4 + QUAD $0x050f0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 15], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x060f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 6 + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] + QUAD $0x070f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 7 + QUAD $0x080f1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 15], 8 QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] - QUAD $0x030f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x040f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x050f3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 15], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x060f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 6 - QUAD $0x070f125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 15], 7 - QUAD $0x0000008824948b4c // mov r10, qword [rsp + 136] - QUAD $0x080f125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 15], 8 - QUAD $0x090f225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 15], 9 - QUAD $0x0a0f325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 15], 10 - QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] - QUAD $0x0b0f325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 15], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x090f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a0f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 10 + QUAD $0x0b0f0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 15], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c0f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 12 - QUAD $0x0d0f1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 15], 13 - LONG $0x245c8b4c; BYTE $0x48 // mov r11, qword [rsp + 72] - QUAD $0x0e0f1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 15], 14 - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0f225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 15], 15 + QUAD $0x0d0f025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 15], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e0f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 14 + QUAD $0x0f0f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 15 LONG $0x387de3c4; WORD $0x01c1 // vinserti128 ymm0, ymm0, xmm1, 1 QUAD $0x00038024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 896], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x0003a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 928], ymm0 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] - LONG $0x3274b60f; BYTE $0x10 // movzx esi, byte [rdx + rsi + 16] + LONG $0x74b60f42; WORD $0x1032 // movzx esi, byte [rdx + r14 + 16] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e8248c8b4c // mov r9, qword [rsp + 232] - QUAD $0x01100a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 16], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 4 - QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] - QUAD $0x051032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 5 - LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] - QUAD $0x061032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 6 - QUAD $0x07102a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 16], 7 - QUAD $0x081002442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 16], 8 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 9 - QUAD $0x0a103a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 16], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b1032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c1032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 12 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + QUAD $0x01100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 1 + QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x021002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x031002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 3 + QUAD $0x041012442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 16], 4 + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x051002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x061002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 6 + QUAD $0x07103a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 16], 7 + QUAD $0x000000e0249c8b4c // mov r11, qword [rsp + 224] + QUAD $0x08101a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 16], 8 + QUAD $0x09102a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 16], 9 + QUAD $0x0a1022442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 16], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b1002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c1002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] QUAD $0x0d1032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 13 - QUAD $0x0e100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 15 - QUAD $0x00000108249c8b48 // mov rbx, qword [rsp + 264] + LONG $0x24748b4c; BYTE $0x40 // mov r14, qword [rsp + 64] + QUAD $0x0e1032442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 16], 14 + LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] + QUAD $0x0f1002442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 16], 15 + QUAD $0x00000100249c8b48 // mov rbx, qword [rsp + 256] LONG $0x1a74b60f; BYTE $0x10 // movzx esi, byte [rdx + rbx + 16] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] - QUAD $0x0110024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 16], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] + QUAD $0x0110124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 16], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] QUAD $0x0210324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] - QUAD $0x0310324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x03103a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 16], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0410324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 4 + QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] QUAD $0x05103a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 16], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0610324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0710324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 7 - QUAD $0x0810124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 16], 8 - QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] - QUAD $0x09103a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 16], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x000000f8248c8b4c // mov r9, qword [rsp + 248] + QUAD $0x07100a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 16], 7 + QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] + QUAD $0x08102a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 16], 8 + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x0910324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 9 + LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0a10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 10 - QUAD $0x0b10324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 16], 11 - QUAD $0x0c10024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d10024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 13 - QUAD $0x0e101a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 16], 14 - QUAD $0x0f10224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 16], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x11 // movzx esi, byte [rdx + rax + 17] - LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x01110a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 17], 1 - QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] - QUAD $0x02111a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 17], 2 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] - QUAD $0x031112542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 17], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] - QUAD $0x041102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 4 - QUAD $0x000000f824ac8b4c // mov r13, qword [rsp + 248] - QUAD $0x05112a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 17], 5 - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] - QUAD $0x06110a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 17], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x071102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 7 - QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] - QUAD $0x081132542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 17], 8 - QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] - QUAD $0x09113a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 17], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 11 QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] - QUAD $0x0c1122542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 17], 12 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] - QUAD $0x0d1132542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 17], 13 + QUAD $0x0b10224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 16], 11 LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1132542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 17], 14 - QUAD $0x0f110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 15 + QUAD $0x0c10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 12 + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + QUAD $0x0d10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 13 + QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] + QUAD $0x0e10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x0f10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 15 + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x11 // movzx esi, byte [rdx + rsi + 17] + LONG $0xd66ef9c5 // vmovd xmm2, esi + QUAD $0x01110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 1 + QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] + QUAD $0x02110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 2 + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x03110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 3 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x04110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 4 + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x05110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 5 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x06110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 6 + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x07110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 7 + QUAD $0x08111a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 17], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x09110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 9 + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x0a110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 11 + QUAD $0x0c1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 13 + QUAD $0x0e1132542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 17], 14 + QUAD $0x0f1102542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 17], 15 LONG $0x1a74b60f; BYTE $0x11 // movzx esi, byte [rdx + rbx + 17] LONG $0xde6ef9c5 // vmovd xmm3, esi - QUAD $0x0111025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 1 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x02110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 2 + QUAD $0x0111125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 17], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0211025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 17], 2 + QUAD $0x03113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 3 + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x04113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 4 + QUAD $0x05113a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 17], 5 + QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] + QUAD $0x06113a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 17], 6 + QUAD $0x07110a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 17], 7 + QUAD $0x08112a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 17], 8 QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] - QUAD $0x0311025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x0411325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x0511325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] - QUAD $0x0611325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0711325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] - QUAD $0x0811325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 8 - QUAD $0x09113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 9 - QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] - QUAD $0x0a113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] - QUAD $0x0b11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 12 - LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] - QUAD $0x0d11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 13 - LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] - QUAD $0x0e11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 14 + QUAD $0x0911025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x0a110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 10 + QUAD $0x0b11225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 17], 11 + LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] + QUAD $0x0c110a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 17], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d11025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 17], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e11025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 17], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00036024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 864], ymm0 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - QUAD $0x0f1132442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rsi + 17], 15 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f1102442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rax + 17], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00034024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 832], ymm0 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] - LONG $0x3274b60f; BYTE $0x12 // movzx esi, byte [rdx + rsi + 18] + QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] + LONG $0x1a74b60f; BYTE $0x12 // movzx esi, byte [rdx + rbx + 18] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 1 - QUAD $0x02121a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 18], 2 + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x011202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 1 + QUAD $0x0000010824b48b4c // mov r14, qword [rsp + 264] + QUAD $0x021232442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 18], 2 + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] QUAD $0x031212442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 18], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 4 - QUAD $0x05122a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 18], 5 - QUAD $0x06120a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 18], 6 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] - QUAD $0x071232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 7 - QUAD $0x081232442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 18], 8 - QUAD $0x09123a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 18], 9 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] - QUAD $0x0a122a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 18], 10 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x041202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 4 + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] + QUAD $0x05121a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 18], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x061202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x071202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 7 + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] + QUAD $0x081202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 8 + QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + QUAD $0x091202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] QUAD $0x0b1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 11 - QUAD $0x0c1222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 18], 12 - QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] - QUAD $0x0d120a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 18], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0e1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 15 - LONG $0x1a74b60f; BYTE $0x12 // movzx esi, byte [rdx + rbx + 18] - LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24748b4c; BYTE $0x78 // mov r14, qword [rsp + 120] - QUAD $0x0112324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 18], 1 - QUAD $0x02120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 2 - QUAD $0x0312024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0412024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0512024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x0612024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 6 - QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] - QUAD $0x07121a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 18], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x0912024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 9 - QUAD $0x0a123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] - QUAD $0x0b12324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c12324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 12 - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] - QUAD $0x0d12224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 18], 13 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c1232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 12 LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] - QUAD $0x0e12324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 14 - LONG $0x24548b4c; BYTE $0x38 // mov r10, qword [rsp + 56] - QUAD $0x0f12124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 18], 15 - QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x133a // movzx esi, byte [rdx + r15 + 19] - LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 4 - QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] - QUAD $0x051332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 5 - LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] - QUAD $0x061332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 6 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] - QUAD $0x071332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 7 + QUAD $0x0d1232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 13 + LONG $0x24648b4c; BYTE $0x40 // mov r12, qword [rsp + 64] + QUAD $0x0e1222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 18], 14 + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] + QUAD $0x0f122a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 18], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x12 // movzx esi, byte [rdx + rsi + 18] + LONG $0xce6ef9c5 // vmovd xmm1, esi QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] - QUAD $0x081332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 8 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 9 - QUAD $0x0a132a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 19], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b1332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c1332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 12 - QUAD $0x0d130a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 19], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] - QUAD $0x0e133a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 19], 14 - LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] - QUAD $0x0f1302542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 19], 15 + QUAD $0x0112324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0212324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0312324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 3 + QUAD $0x04123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x0512324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 5 + QUAD $0x06123a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 18], 6 + QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] + QUAD $0x07123a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 18], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x0812324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 8 + QUAD $0x0912024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 9 + QUAD $0x0a120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 10 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x0b120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 11 + QUAD $0x0c120a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 18], 12 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x0d120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 13 + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] + QUAD $0x0e120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 14 + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x0f120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 15 LONG $0x1a74b60f; BYTE $0x13 // movzx esi, byte [rdx + rbx + 19] + LONG $0xd66ef9c5 // vmovd xmm2, esi + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + QUAD $0x01130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 1 + QUAD $0x021332542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 19], 2 + QUAD $0x031312542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 19], 3 + LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] + QUAD $0x04130a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 19], 4 + QUAD $0x05131a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 19], 5 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x06130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 6 + QUAD $0x000000d024848b4c // mov r8, qword [rsp + 208] + QUAD $0x071302542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 19], 7 + QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] + QUAD $0x081332542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 19], 8 + QUAD $0x000000e824948b4c // mov r10, qword [rsp + 232] + QUAD $0x091312542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 19], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 10 + QUAD $0x0b1302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c1302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 12 + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x0d131a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 19], 13 + QUAD $0x0e1322542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 19], 14 + QUAD $0x0f132a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 19], 15 + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + LONG $0x0274b60f; BYTE $0x13 // movzx esi, byte [rdx + rax + 19] LONG $0xde6ef9c5 // vmovd xmm3, esi - QUAD $0x0113325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 19], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x0213325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 2 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] - QUAD $0x03131a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 19], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x0413325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x0513325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 5 - QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] - QUAD $0x06132a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 19], 6 - QUAD $0x07131a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 19], 7 - QUAD $0x08130a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 19], 8 + QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + QUAD $0x01131a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 19], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0213025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0313025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 3 + QUAD $0x04133a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 19], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x0513025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 5 + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] + QUAD $0x06133a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 19], 6 + QUAD $0x07133a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 19], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0813025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0913025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 11 - QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] - QUAD $0x0c130a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 19], 12 - QUAD $0x0d13225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 19], 13 - LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] - QUAD $0x0e13325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 19], 14 - QUAD $0x0f13125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 19], 15 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00030024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 768], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x00032024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 800], ymm0 - LONG $0x74b60f42; WORD $0x143a // movzx esi, byte [rdx + r15 + 20] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x0274b60f; BYTE $0x14 // movzx esi, byte [rdx + rax + 20] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e8249c8b4c // mov r11, qword [rsp + 232] - QUAD $0x01141a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 20], 1 - QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] - QUAD $0x021422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 20], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x031402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 3 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] - QUAD $0x04140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 4 - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x051412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 20], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x061402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 6 + LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] + QUAD $0x011422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 20], 1 + QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x021402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 2 QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x071402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x081402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 8 + QUAD $0x031402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 3 + QUAD $0x04140a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 20], 4 QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x091402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 10 + QUAD $0x051402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 5 + QUAD $0x06140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 6 + QUAD $0x071402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 20], 7 + QUAD $0x081432442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 20], 8 + QUAD $0x091412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 20], 9 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 13 - QUAD $0x0e143a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 20], 14 - QUAD $0x0f1402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 20], 15 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x14 // movzx esi, byte [rdx + rax + 20] + QUAD $0x0a1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 11 + LONG $0x246c8b4c; BYTE $0x50 // mov r13, qword [rsp + 80] + QUAD $0x0c142a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 20], 12 + QUAD $0x0d141a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 20], 13 + LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] + QUAD $0x0e141a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 20], 14 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x0f140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 15 + QUAD $0x00000100248c8b4c // mov r9, qword [rsp + 256] + LONG $0x74b60f42; WORD $0x140a // movzx esi, byte [rdx + r9 + 20] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x0114024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 20], 1 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x0214024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 20], 2 - QUAD $0x03141a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 20], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0414024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 20], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x05143a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 20], 5 - QUAD $0x06142a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 20], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x01141a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 20], 1 + LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] + QUAD $0x0214024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 20], 2 + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + QUAD $0x03143a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 20], 3 + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x04140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 4 + QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] + QUAD $0x05140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 5 + QUAD $0x06143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 6 + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] QUAD $0x0714324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x0814324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 8 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x0914324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] - QUAD $0x0a14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 10 - QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] - QUAD $0x0b14024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 20], 11 - QUAD $0x0c140a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 20], 12 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] - QUAD $0x0d142a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 20], 13 - QUAD $0x0e14324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 20], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - QUAD $0x0f14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 15 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] + QUAD $0x0a14124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 20], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 11 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x0c14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 12 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0d143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 13 + QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] + QUAD $0x0e14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 14 + QUAD $0x0000014024b48b4c // mov r14, qword [rsp + 320] + QUAD $0x0f14324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 20], 15 + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] LONG $0x3274b60f; BYTE $0x15 // movzx esi, byte [rdx + rsi + 21] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x01151a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 21], 1 - QUAD $0x021522542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 21], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 3 - QUAD $0x04150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 4 - QUAD $0x051512542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 21], 5 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] - QUAD $0x06153a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 21], 6 + QUAD $0x011522542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 21], 1 + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] + QUAD $0x021532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 2 QUAD $0x000000c8249c8b4c // mov r11, qword [rsp + 200] - QUAD $0x07151a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 21], 7 - QUAD $0x000000c024a48b4c // mov r12, qword [rsp + 192] - QUAD $0x081522542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 21], 8 - QUAD $0x000000b824948b4c // mov r10, qword [rsp + 184] - QUAD $0x091512542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 21], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 10 - LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] - QUAD $0x0b1532542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 21], 11 - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] - QUAD $0x0c150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 12 - QUAD $0x000000d0249c8b48 // mov rbx, qword [rsp + 208] - QUAD $0x0d151a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 21], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 15 - QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] - LONG $0x0a74b60f; BYTE $0x15 // movzx esi, byte [rdx + rcx + 21] + QUAD $0x03151a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 21], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x041532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 4 + QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] + QUAD $0x051532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x061532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x071532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x081532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 8 + QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + QUAD $0x091532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 9 + QUAD $0x0a1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 11 + QUAD $0x0c152a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 21], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 13 + QUAD $0x0e151a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 21], 14 + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] + QUAD $0x0f152a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 21], 15 + LONG $0x74b60f42; WORD $0x150a // movzx esi, byte [rdx + r9 + 21] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x01150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 1 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x02150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] - QUAD $0x03150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 3 - QUAD $0x0415025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 4 - QUAD $0x05153a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 21], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x0615025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 6 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x07153a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 21], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 8 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x0115025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 1 + QUAD $0x0215025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 21], 2 + QUAD $0x03153a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 21], 3 + QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] + QUAD $0x04150a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 21], 4 + QUAD $0x05150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 5 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x0915025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a15025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 10 - QUAD $0x0b15025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 21], 11 - QUAD $0x0c150a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 21], 12 - QUAD $0x0d152a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 21], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] - QUAD $0x0e15025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 14 + QUAD $0x0615025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 6 + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x0715025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0815025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 8 + QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x0915225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 21], 9 + QUAD $0x0a15125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 21], 10 + QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] + QUAD $0x0b151a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 21], 11 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x0c150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 12 + QUAD $0x0d153a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 21], 13 + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] + QUAD $0x0e150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x0002c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 704], ymm0 - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] - QUAD $0x0f1502442061a3c4 // vpinsrb xmm0, xmm3, byte [rdx + r8 + 21], 15 + QUAD $0x0f1532442061a3c4 // vpinsrb xmm0, xmm3, byte [rdx + r14 + 21], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x0002e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 736], ymm0 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x16 // movzx esi, byte [rdx + rax + 22] + QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] + LONG $0x74b60f42; WORD $0x1612 // movzx esi, byte [rdx + r10 + 22] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 4 - QUAD $0x000000f824ac8b4c // mov r13, qword [rsp + 248] - QUAD $0x05162a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 22], 5 - QUAD $0x06163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 6 + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] + QUAD $0x01163a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 22], 1 + QUAD $0x0000010824848b4c // mov r8, qword [rsp + 264] + QUAD $0x021602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 22], 2 + QUAD $0x03161a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 22], 3 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x04160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 4 + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x05160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 5 + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x061632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 22], 6 + QUAD $0x000000d0249c8b4c // mov r11, qword [rsp + 208] QUAD $0x07161a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 22], 7 - QUAD $0x081622442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 22], 8 - QUAD $0x091612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 22], 9 - QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] - QUAD $0x0a1622442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 22], 10 - QUAD $0x0b1632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 22], 11 - QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] - QUAD $0x0c161a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 22], 12 - QUAD $0x0d161a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 22], 13 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 14 + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x08163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x09160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 9 + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x0a160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 11 LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] - QUAD $0x0f1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 15 - QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] - LONG $0x74b60f42; WORD $0x1612 // movzx esi, byte [rdx + r10 + 22] + QUAD $0x0c1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 14 + QUAD $0x0f162a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 22], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x16 // movzx esi, byte [rdx + rsi + 22] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] QUAD $0x0116324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 1 - LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] - QUAD $0x02161a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 22], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0216324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0316324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x0416324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 4 - LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] - QUAD $0x0516324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 22], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x04160a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 22], 4 + QUAD $0x0000008824ac8b4c // mov r13, qword [rsp + 136] + QUAD $0x05162a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 22], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0616324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 6 - QUAD $0x07163a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 22], 7 - QUAD $0x08160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 8 - QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - QUAD $0x09160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] - QUAD $0x0a160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 10 - QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] - QUAD $0x0b160a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 22], 11 - QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] - QUAD $0x0c160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 12 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] - QUAD $0x0d160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 13 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] - QUAD $0x0e163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 14 - QUAD $0x0f16024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 22], 15 - LONG $0x0274b60f; BYTE $0x17 // movzx esi, byte [rdx + rax + 23] + QUAD $0x000000f8248c8b4c // mov r9, qword [rsp + 248] + QUAD $0x07160a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 22], 7 + QUAD $0x0816024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 8 + QUAD $0x0916224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 22], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 10 + QUAD $0x0b161a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 22], 11 + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + QUAD $0x0c16224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 22], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 15 + LONG $0x74b60f42; WORD $0x1712 // movzx esi, byte [rdx + r10 + 23] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] - QUAD $0x011702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x021702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 2 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x03173a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 23], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x01173a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 23], 1 + QUAD $0x021702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 23], 2 + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] + QUAD $0x031712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 23], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x041702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 4 - QUAD $0x05172a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 23], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x07170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x08170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x09170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 9 - QUAD $0x0a1722542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 23], 10 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x051702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 5 + QUAD $0x061732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 23], 6 + QUAD $0x07171a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 23], 7 + QUAD $0x08173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 8 + QUAD $0x000000e824b48b4c // mov r14, qword [rsp + 232] + QUAD $0x091732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 23], 9 + LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x0a173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 10 QUAD $0x0b170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 11 - QUAD $0x0c171a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 23], 12 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] - QUAD $0x0d170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 14 - LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] - QUAD $0x0f1722542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 23], 15 - LONG $0x74b60f42; WORD $0x1712 // movzx esi, byte [rdx + r10 + 23] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c1702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 12 + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x0d171a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 23], 13 + LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] + QUAD $0x0e171a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 23], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f1702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 15 + QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] + LONG $0x74b60f42; WORD $0x173a // movzx esi, byte [rdx + r15 + 23] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] - QUAD $0x01171a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 23], 1 - QUAD $0x02171a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 23], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] - QUAD $0x03170a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 23], 3 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] - QUAD $0x04171a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 23], 4 - QUAD $0x0517325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 23], 5 - QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] + QUAD $0x01170a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 23], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0217025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 23], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0317325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0417325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 4 + QUAD $0x05172a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 23], 5 + QUAD $0x0000009824ac8b4c // mov r13, qword [rsp + 152] QUAD $0x06172a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 23], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0717325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x07170a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 23], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x0817325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 8 - QUAD $0x0000009824848b4c // mov r8, qword [rsp + 152] + QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] QUAD $0x0917025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 23], 9 - QUAD $0x0000014024948b4c // mov r10, qword [rsp + 320] - QUAD $0x0a17125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 23], 10 - QUAD $0x0b170a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 23], 11 + LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] + QUAD $0x0a17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 11 + QUAD $0x0c17225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 23], 12 + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + QUAD $0x0d17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 13 QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 12 - LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] - QUAD $0x0d17325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 23], 13 - QUAD $0x0e173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0e17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] QUAD $0x0f17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 15 LONG $0x387563c4; WORD $0x01d0 // vinserti128 ymm10, ymm1, xmm0, 1 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x0002a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 672], ymm0 - QUAD $0x00000100248c8b4c // mov r9, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x180a // movzx esi, byte [rdx + r9 + 24] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x18 // movzx esi, byte [rdx + rsi + 24] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x011832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x021832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 2 - QUAD $0x03183a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 24], 3 - QUAD $0x041802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x051802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x061802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x071802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 7 - QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] - QUAD $0x08183a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 24], 8 + QUAD $0x031812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 24], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x041832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 4 QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 9 + QUAD $0x051832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x061832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x071832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 7 QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] - QUAD $0x0a1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x081832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 8 + QUAD $0x091832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 24], 9 + QUAD $0x0a183a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 24], 10 + QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] + QUAD $0x0b1832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 24], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] QUAD $0x0c1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 12 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] - QUAD $0x0d1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 13 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 14 - QUAD $0x0f1822442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 24], 15 - QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] - LONG $0x3274b60f; BYTE $0x18 // movzx esi, byte [rdx + rsi + 24] + QUAD $0x0d181a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 24], 13 + QUAD $0x0e181a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 24], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 15 + LONG $0x74b60f42; WORD $0x183a // movzx esi, byte [rdx + r15 + 24] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x01181a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 24], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x0218324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 2 - QUAD $0x03180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 3 - QUAD $0x04181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 4 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] - QUAD $0x05180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 5 + QUAD $0x01180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 1 + QUAD $0x0218024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0318024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 3 + QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] + QUAD $0x0418224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 24], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x0518024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 5 QUAD $0x06182a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 24], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x07180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 7 - QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] + QUAD $0x07180a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 24], 7 + QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] QUAD $0x08183a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 24], 8 QUAD $0x0918024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 9 - QUAD $0x0a18124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 24], 10 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] - QUAD $0x0b180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 11 - QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a18024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 10 + QUAD $0x0000008024848b4c // mov r8, qword [rsp + 128] + QUAD $0x0b18024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 11 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x0c180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 12 - QUAD $0x0d18324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 24], 13 - LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] - QUAD $0x0e18024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 14 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] - QUAD $0x0f180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 15 - LONG $0x74b60f42; WORD $0x190a // movzx esi, byte [rdx + r9 + 25] - LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] - QUAD $0x01190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x02190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 2 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] - QUAD $0x03190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 3 - QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] - QUAD $0x04191a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 25], 4 - QUAD $0x000000f8248c8b4c // mov r9, qword [rsp + 248] - QUAD $0x05190a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 25], 5 - LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] - QUAD $0x061922542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 25], 6 - QUAD $0x071902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 7 - QUAD $0x08193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x091902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 9 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] - QUAD $0x0a192a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 25], 10 - LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] - QUAD $0x0b191a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 25], 11 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] - QUAD $0x0c1932542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 25], 12 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] - QUAD $0x0d190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0e1902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 15 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + QUAD $0x0d18124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 24], 13 + QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] + QUAD $0x0e183a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 24], 14 + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] + QUAD $0x0f181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 15 + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x19 // movzx esi, byte [rdx + rax + 25] + LONG $0xd66ef9c5 // vmovd xmm2, esi + LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] + QUAD $0x01192a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 25], 1 + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] + QUAD $0x021932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x041932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 4 + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] + QUAD $0x05191a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 25], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x061932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x071932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x081932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 8 + QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + QUAD $0x091932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 10 + QUAD $0x0b1932542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 25], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x19 // movzx esi, byte [rdx + rsi + 25] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] - QUAD $0x01193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 1 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x0219025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] - QUAD $0x0319025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0419025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0519025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x0619025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x0719025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 7 + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x0119325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0219325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0319325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 3 + QUAD $0x0419225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 25], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x0519325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x0619325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 6 + QUAD $0x07190a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 25], 7 QUAD $0x08193a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 25], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x0919025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 9 - QUAD $0x0a19125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 25], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] - QUAD $0x0b19025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c19325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 12 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x09193a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 25], 9 + LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] + QUAD $0x0a190a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 25], 10 + QUAD $0x0b19025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 25], 11 + QUAD $0x0c190a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 25], 12 QUAD $0x0d19125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 25], 13 - QUAD $0x0e19025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 25], 14 + QUAD $0x0e193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 14 LONG $0x387563c4; WORD $0x01c8 // vinserti128 ymm9, ymm1, xmm0, 1 - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] - QUAD $0x0f1902442061a3c4 // vpinsrb xmm0, xmm3, byte [rdx + r8 + 25], 15 + QUAD $0x0f191a442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rbx + 25], 15 LONG $0x387d63c4; WORD $0x01c2 // vinserti128 ymm8, ymm0, xmm2, 1 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] - LONG $0x3274b60f; BYTE $0x1a // movzx esi, byte [rdx + rsi + 26] + LONG $0x0274b60f; BYTE $0x1a // movzx esi, byte [rdx + rax + 26] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 2 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x031a3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 26], 3 - QUAD $0x041a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 4 - QUAD $0x051a0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 26], 5 - QUAD $0x061a22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 26], 6 - QUAD $0x000000c8249c8b4c // mov r11, qword [rsp + 200] - QUAD $0x071a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 7 - QUAD $0x000000c0248c8b4c // mov r9, qword [rsp + 192] - QUAD $0x081a0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 26], 8 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 9 - QUAD $0x0a1a2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 26], 10 - QUAD $0x0b1a1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 26], 11 - QUAD $0x0c1a32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 26], 12 - QUAD $0x0d1a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e1a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f1a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 15 - QUAD $0x00000108249c8b48 // mov rbx, qword [rsp + 264] - LONG $0x1a74b60f; BYTE $0x1a // movzx esi, byte [rdx + rbx + 26] + QUAD $0x011a2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 26], 1 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x021a12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 26], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x031a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x041a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 4 + QUAD $0x051a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 5 + QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] + QUAD $0x061a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 6 + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x071a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 7 + QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] + QUAD $0x081a32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 26], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x091a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 10 + QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x0b1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 15 + QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] + LONG $0x3a74b60f; BYTE $0x1a // movzx esi, byte [rdx + rdi + 26] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x011a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 1 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x021a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 2 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] - QUAD $0x031a224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 26], 3 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] - QUAD $0x041a2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 26], 4 - LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] - QUAD $0x051a324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 26], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] - QUAD $0x061a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x071a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x081a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 8 + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x011a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 1 + LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] + QUAD $0x021a024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 26], 2 + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + QUAD $0x031a1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 26], 3 + QUAD $0x041a224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 26], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x051a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 5 QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] - QUAD $0x091a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] - QUAD $0x0a1a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 10 - QUAD $0x0b1a024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 26], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c1a024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 26], 12 - QUAD $0x0d1a124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 26], 13 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] - QUAD $0x0e1a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 14 - QUAD $0x0f1a024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 26], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x1b // movzx esi, byte [rdx + rax + 27] + QUAD $0x061a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 6 + QUAD $0x000000f824a48b4c // mov r12, qword [rsp + 248] + QUAD $0x071a224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 26], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x081a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 8 + QUAD $0x091a3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 26], 9 + QUAD $0x0a1a0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 26], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 11 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x0c1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 12 + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + QUAD $0x0d1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 13 + QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] + QUAD $0x0e1a0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 26], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x0f1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 15 + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x1b // movzx esi, byte [rdx + rsi + 27] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] - QUAD $0x011b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 27], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x021b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 2 - QUAD $0x031b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 27], 3 - QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] - QUAD $0x041b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 27], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x051b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x061b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 6 - QUAD $0x071b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 27], 7 - QUAD $0x081b0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 27], 8 - QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] - QUAD $0x091b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 27], 9 - QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] - QUAD $0x0a1b0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 27], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 12 + QUAD $0x011b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 27], 1 + QUAD $0x021b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 27], 2 + WORD $0x894d; BYTE $0xd5 // mov r13, r10 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 27], 3 + QUAD $0x041b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 4 + QUAD $0x000000b824948b4c // mov r10, qword [rsp + 184] + QUAD $0x051b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 27], 5 + QUAD $0x061b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 27], 6 QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x071b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 7 + QUAD $0x081b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 27], 8 + QUAD $0x091b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 10 + QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] + QUAD $0x0b1b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 27], 11 + LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] + QUAD $0x0c1b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0e1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 15 - LONG $0x1a74b60f; BYTE $0x1b // movzx esi, byte [rdx + rbx + 27] + LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] + QUAD $0x0f1b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 27], 15 + LONG $0x3a74b60f; BYTE $0x1b // movzx esi, byte [rdx + rdi + 27] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x011b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 1 - QUAD $0x021b3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 27], 2 - QUAD $0x031b225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 27], 3 - QUAD $0x041b2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 27], 4 - QUAD $0x051b325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 27], 5 - QUAD $0x000000a024a48b4c // mov r12, qword [rsp + 160] - QUAD $0x061b225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 27], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x071b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x011b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 1 + QUAD $0x021b025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 27], 2 + QUAD $0x031b1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 27], 3 + QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] + QUAD $0x041b1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 27], 4 + QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] + QUAD $0x051b3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 27], 5 + QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] + QUAD $0x061b3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 27], 6 + QUAD $0x071b225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 27], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x081b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 8 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x091b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0a1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x0b1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0c1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 12 LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0d1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 13 - QUAD $0x0e1b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 14 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] - QUAD $0x0f1b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 15 + QUAD $0x0e1b0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 27], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x0f1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00022024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 544], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x00024024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 576], ymm0 - QUAD $0x0000010024ac8b4c // mov r13, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x1c2a // movzx esi, byte [rdx + r13 + 28] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x1c // movzx esi, byte [rdx + rsi + 28] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x011c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x021c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 2 - LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] - QUAD $0x031c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 28], 3 - QUAD $0x041c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 4 - QUAD $0x000000f824b48b4c // mov r14, qword [rsp + 248] - QUAD $0x051c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 5 - LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x011c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 1 + QUAD $0x021c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 28], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 3 + LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] + QUAD $0x041c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 28], 4 + QUAD $0x051c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] QUAD $0x061c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 6 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] - QUAD $0x071c1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 28], 7 - QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] - QUAD $0x081c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 8 - QUAD $0x091c3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 28], 9 - QUAD $0x0a1c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 28], 10 - LONG $0x24548b4c; BYTE $0x58 // mov r10, qword [rsp + 88] - QUAD $0x0b1c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 11 - QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] - QUAD $0x0c1c3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 28], 12 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] - QUAD $0x0d1c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 13 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 14 - LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] - QUAD $0x0f1c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 15 - QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] - LONG $0x3274b60f; BYTE $0x1c // movzx esi, byte [rdx + rsi + 28] + QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] + QUAD $0x071c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 28], 7 + QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] + QUAD $0x081c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 28], 8 + QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] + QUAD $0x091c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 10 + QUAD $0x0b1c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 11 + QUAD $0x0c1c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 12 + LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] + QUAD $0x0d1c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 13 + QUAD $0x0e1c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 28], 14 + QUAD $0x0f1c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 28], 15 + QUAD $0x00000100249c8b4c // mov r11, qword [rsp + 256] + LONG $0x74b60f42; WORD $0x1c1a // movzx esi, byte [rdx + r11 + 28] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] - QUAD $0x011c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x021c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 2 - QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] - QUAD $0x031c0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 28], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x041c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x051c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 5 - QUAD $0x061c224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 28], 6 - QUAD $0x071c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 7 - QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] - QUAD $0x081c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x091c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a1c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] - QUAD $0x0b1c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c1c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 12 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x011c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x021c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x031c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 3 + QUAD $0x041c1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 28], 4 + QUAD $0x051c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 5 + QUAD $0x061c3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 28], 6 + QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] + QUAD $0x071c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 7 + QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] + QUAD $0x081c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 8 + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x091c1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 28], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x0a1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 10 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x0b1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 11 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x0c1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 12 LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0d1c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 13 - LONG $0x24648b4c; BYTE $0x48 // mov r12, qword [rsp + 72] - QUAD $0x0e1c224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 28], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] + QUAD $0x0e1c124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 28], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] QUAD $0x0f1c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 15 - LONG $0x74b60f42; WORD $0x1d2a // movzx esi, byte [rdx + r13 + 29] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x1d // movzx esi, byte [rdx + rsi + 29] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824ac8b4c // mov r13, qword [rsp + 232] - QUAD $0x011d2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 29], 1 - QUAD $0x021d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 2 - QUAD $0x031d1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 29], 3 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] - QUAD $0x041d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 4 - QUAD $0x051d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 5 - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] - QUAD $0x061d1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 29], 6 - QUAD $0x071d1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 29], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x081d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x091d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 9 - QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] - QUAD $0x0a1d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 10 - QUAD $0x0b1d12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 29], 11 - QUAD $0x0c1d3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 29], 12 - QUAD $0x0d1d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 13 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] - QUAD $0x0e1d1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 29], 14 - QUAD $0x0f1d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 15 - QUAD $0x0000010824848b4c // mov r8, qword [rsp + 264] - LONG $0x74b60f42; WORD $0x1d02 // movzx esi, byte [rdx + r8 + 29] - LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x247c8b4c; BYTE $0x78 // mov r15, qword [rsp + 120] - QUAD $0x011d3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 29], 1 - LONG $0x24548b4c; BYTE $0x40 // mov r10, qword [rsp + 64] - QUAD $0x021d125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 29], 2 - QUAD $0x031d0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 29], 3 - LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] - QUAD $0x041d0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 29], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x051d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] - QUAD $0x061d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 6 + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x011d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 1 + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] + QUAD $0x021d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 3 + QUAD $0x041d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 4 + QUAD $0x000000b8248c8b4c // mov r9, qword [rsp + 184] + QUAD $0x051d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 5 QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x071d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] - QUAD $0x081d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 8 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] - QUAD $0x091d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] - QUAD $0x0a1d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 10 - QUAD $0x0b1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 13 - QUAD $0x0e1d22642061a3c4 // vpinsrb xmm4, xmm3, byte [rdx + r12 + 29], 14 + QUAD $0x061d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 6 + QUAD $0x071d2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 29], 7 + QUAD $0x081d22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 29], 8 + QUAD $0x091d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 10 + QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] + QUAD $0x0b1d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 11 + LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] + QUAD $0x0c1d22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 29], 12 + QUAD $0x0d1d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 14 + LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] + QUAD $0x0f1d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 15 + LONG $0x74b60f42; WORD $0x1d1a // movzx esi, byte [rdx + r11 + 29] + LONG $0xde6ef9c5 // vmovd xmm3, esi + QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + QUAD $0x011d1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 29], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x021d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 2 + QUAD $0x031d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 3 + QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] + QUAD $0x041d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 29], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x051d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 5 + QUAD $0x061d3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 29], 6 + QUAD $0x071d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x081d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 8 + QUAD $0x091d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 29], 9 + WORD $0x8949; BYTE $0xdf // mov r15, rbx + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 10 + QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] + QUAD $0x0b1d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 11 + QUAD $0x0c1d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 12 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x0d1d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 13 + QUAD $0x0e1d12642061a3c4 // vpinsrb xmm4, xmm3, byte [rdx + r10 + 29], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00028024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 640], ymm0 - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f1d22442059a3c4 // vpinsrb xmm0, xmm4, byte [rdx + r12 + 29], 15 + QUAD $0x0000014024948b4c // mov r10, qword [rsp + 320] + QUAD $0x0f1d12442059a3c4 // vpinsrb xmm0, xmm4, byte [rdx + r10 + 29], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00026024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 608], ymm0 - QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] - LONG $0x3a74b60f; BYTE $0x1e // movzx esi, byte [rdx + rdi + 30] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x0274b60f; BYTE $0x1e // movzx esi, byte [rdx + rax + 30] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x011e2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 30], 1 - LONG $0x3a74b60f; BYTE $0x1f // movzx esi, byte [rdx + rdi + 31] + LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + QUAD $0x011e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 30], 1 + LONG $0x0274b60f; BYTE $0x1f // movzx esi, byte [rdx + rax + 31] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x011f2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 31], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x011f1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 31], 1 + QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] QUAD $0x021e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 2 QUAD $0x021f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x031e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 3 QUAD $0x031f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x041e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 4 QUAD $0x041f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x051e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 5 - QUAD $0x051f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 5 - QUAD $0x061e1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 30], 6 - QUAD $0x061f1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 31], 6 - QUAD $0x0000011024bc8b48 // mov rdi, qword [rsp + 272] - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + WORD $0x894c; BYTE $0xc8 // mov rax, r9 + QUAD $0x051e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 30], 5 + QUAD $0x051f0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 31], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x061e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 6 + QUAD $0x061f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] QUAD $0x071e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 7 QUAD $0x071f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x00000110249c8b48 // mov rbx, qword [rsp + 272] + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] QUAD $0x081e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 8 QUAD $0x081f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 8 - QUAD $0x091e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 30], 9 - QUAD $0x091f0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 31], 9 - QUAD $0x0a1e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 30], 10 - QUAD $0x0a1f324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 31], 10 + QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + QUAD $0x091e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 9 + QUAD $0x091f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 9 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 11 - QUAD $0x0b1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 12 - QUAD $0x0c1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0a1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 10 + QUAD $0x0a1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 10 + WORD $0x894c; BYTE $0xc0 // mov rax, r8 + QUAD $0x0b1e02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 30], 11 + QUAD $0x0b1f024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 31], 11 + QUAD $0x0c1e22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 30], 12 + QUAD $0x0c1f224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 31], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 13 QUAD $0x0d1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 13 - WORD $0x8948; BYTE $0xd8 // mov rax, rbx - QUAD $0x0e1e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 30], 14 - QUAD $0x0e1f1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 31], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 15 - QUAD $0x0f1f02542071e3c4 // vpinsrb xmm2, xmm1, byte [rdx + rax + 31], 15 - WORD $0x894c; BYTE $0xc6 // mov rsi, r8 - LONG $0x44b60f42; WORD $0x1e02 // movzx eax, byte [rdx + r8 + 30] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 14 + QUAD $0x0e1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 14 + QUAD $0x0f1e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 30], 15 + QUAD $0x0f1f32542071a3c4 // vpinsrb xmm2, xmm1, byte [rdx + r14 + 31], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3244b60f; BYTE $0x1e // movzx eax, byte [rdx + rsi + 30] LONG $0xc86ef9c5 // vmovd xmm1, eax - QUAD $0x011e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 30], 1 - LONG $0x44b60f42; WORD $0x1f02 // movzx eax, byte [rdx + r8 + 31] + QUAD $0x011e1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 30], 1 + LONG $0x3244b60f; BYTE $0x1f // movzx eax, byte [rdx + rsi + 31] LONG $0xf86ef9c5 // vmovd xmm7, eax - QUAD $0x011f3a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 31], 1 - QUAD $0x021e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 30], 2 - QUAD $0x021f127c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r10 + 31], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x011f1a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r11 + 31], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x021e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 2 + QUAD $0x021f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x031e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 3 QUAD $0x031f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 3 - QUAD $0x041e0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 30], 4 - QUAD $0x041f0a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r9 + 31], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x041e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 30], 4 + QUAD $0x041f2a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 31], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x051e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 5 QUAD $0x051f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x061e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 6 QUAD $0x061f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] QUAD $0x071e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 7 QUAD $0x071f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 7 - QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x081e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 8 QUAD $0x081f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x091e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 9 - QUAD $0x091f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x091e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 30], 9 + QUAD $0x091f3a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 31], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 10 QUAD $0x0a1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] - QUAD $0x0b1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 11 - QUAD $0x0b1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0b1e3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 30], 11 + QUAD $0x0b1f3a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rdi + 31], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 12 QUAD $0x0c1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 13 - QUAD $0x0d1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + WORD $0x8948; BYTE $0xc8 // mov rax, rcx + QUAD $0x0d1e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 30], 13 + QUAD $0x0d1f0a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rcx + 31], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 14 QUAD $0x0e1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 14 - WORD $0x894c; BYTE $0xe0 // mov rax, r12 - QUAD $0x0f1e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 30], 15 - QUAD $0x0f1f227c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r12 + 31], 15 + QUAD $0x0f1e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 30], 15 + QUAD $0x0f1f127c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r10 + 31], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00014024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 320], ymm0 LONG $0x3845e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm7, xmm2, 1 @@ -14596,71 +15238,71 @@ LBB2_166: LONG $0x3865e3c4; WORD $0x01e0 // vinserti128 ymm4, ymm3, xmm0, 1 LONG $0x4665e3c4; WORD $0x31c0 // vperm2i128 ymm0, ymm3, ymm0, 49 QUAD $0x00000198248c8b48 // mov rcx, qword [rsp + 408] - LONG $0x447ffec5; WORD $0x608f // vmovdqu yword [rdi + 4*rcx + 96], ymm0 - LONG $0x547ffec5; WORD $0x408f // vmovdqu yword [rdi + 4*rcx + 64], ymm2 - LONG $0x647ffec5; WORD $0x208f // vmovdqu yword [rdi + 4*rcx + 32], ymm4 - LONG $0x0c7ffec5; BYTE $0x8f // vmovdqu yword [rdi + 4*rcx], ymm1 + LONG $0x447ffec5; WORD $0x608b // vmovdqu yword [rbx + 4*rcx + 96], ymm0 + LONG $0x547ffec5; WORD $0x408b // vmovdqu yword [rbx + 4*rcx + 64], ymm2 + LONG $0x647ffec5; WORD $0x208b // vmovdqu yword [rbx + 4*rcx + 32], ymm4 + LONG $0x0c7ffec5; BYTE $0x8b // vmovdqu yword [rbx + 4*rcx], ymm1 LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB2_166 + JNE LBB2_167 QUAD $0x0000018824bc8b4c // mov r15, qword [rsp + 392] QUAD $0x0000018024bc3b4c // cmp r15, qword [rsp + 384] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - LONG $0x24748b44; BYTE $0x1c // mov r14d, dword [rsp + 28] + LONG $0x245c8b44; BYTE $0x1c // mov r11d, dword [rsp + 28] QUAD $0x0000019024a48b4c // mov r12, qword [rsp + 400] JNE LBB2_43 JMP LBB2_129 -LBB2_168: +LBB2_169: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx QUAD $0x0000019024848948 // mov qword [rsp + 400], rax QUAD $0x0000018024bc894c // mov qword [rsp + 384], r15 - LONG $0xbb048d4b // lea rax, [r11 + 4*r15] + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] QUAD $0x0000017824848948 // mov qword [rsp + 376], rax - LONG $0x6e79c1c4; BYTE $0xc6 // vmovd xmm0, r14d + LONG $0x6e79c1c4; BYTE $0xc3 // vmovd xmm0, r11d LONG $0x787de2c4; BYTE $0xc0 // vpbroadcastb ymm0, xmm0 QUAD $0x00020024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 512], ymm0 WORD $0xc031 // xor eax, eax - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 -LBB2_169: +LBB2_170: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000019824848948 // mov qword [rsp + 408], rax LONG $0x05e3c148 // shl rbx, 5 WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x20c88348 // or rax, 32 - LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax + QUAD $0x000000c024848948 // mov qword [rsp + 192], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x40c88348 // or rax, 64 - LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax + LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x60c88348 // or rax, 96 - QUAD $0x000000b024848948 // mov qword [rsp + 176], rax + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00800d48; WORD $0x0000 // or rax, 128 - LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + QUAD $0x000000a824848948 // mov qword [rsp + 168], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00a00d48; WORD $0x0000 // or rax, 160 - LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax + QUAD $0x0000008824848948 // mov qword [rsp + 136], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00c00d48; WORD $0x0000 // or rax, 192 - QUAD $0x000000a024848948 // mov qword [rsp + 160], rax + QUAD $0x0000009824848948 // mov qword [rsp + 152], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00e00d48; WORD $0x0000 // or rax, 224 - QUAD $0x0000009024848948 // mov qword [rsp + 144], rax + QUAD $0x000000f824848948 // mov qword [rsp + 248], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01000d48; WORD $0x0000 // or rax, 256 - QUAD $0x0000008824848948 // mov qword [rsp + 136], rax + QUAD $0x000000a024848948 // mov qword [rsp + 160], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01200d48; WORD $0x0000 // or rax, 288 - QUAD $0x0000009824848948 // mov qword [rsp + 152], rax + QUAD $0x000000b024848948 // mov qword [rsp + 176], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01400d48; WORD $0x0000 // or rax, 320 - QUAD $0x0000014024848948 // mov qword [rsp + 320], rax + LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x02000d48; WORD $0x0000 // or rax, 512 WORD $0x8948; BYTE $0xc1 // mov rcx, rax @@ -14669,1918 +15311,1929 @@ LBB2_169: LONG $0x1a04b60f // movzx eax, byte [rdx + rbx] LONG $0xd86ef9c5 // vmovd xmm3, eax LONG $0x0a44b60f; BYTE $0x01 // movzx eax, byte [rdx + rcx + 1] + WORD $0x8948; BYTE $0xce // mov rsi, rcx LONG $0xe06ef9c5 // vmovd xmm4, eax + WORD $0x8948; BYTE $0xd9 // mov rcx, rbx LONG $0x1a44b60f; BYTE $0x01 // movzx eax, byte [rdx + rbx + 1] LONG $0xd06e79c5 // vmovd xmm10, eax - LONG $0x0a44b60f; BYTE $0x02 // movzx eax, byte [rdx + rcx + 2] - WORD $0x8948; BYTE $0xcf // mov rdi, rcx + LONG $0x3244b60f; BYTE $0x02 // movzx eax, byte [rdx + rsi + 2] LONG $0xc86ef9c5 // vmovd xmm1, eax QUAD $0x0001e0248c7ff9c5; BYTE $0x00 // vmovdqa oword [rsp + 480], xmm1 - WORD $0x8948; BYTE $0xd9 // mov rcx, rbx LONG $0x1a44b60f; BYTE $0x02 // movzx eax, byte [rdx + rbx + 2] LONG $0xc86ef9c5 // vmovd xmm1, eax QUAD $0x0001c0248c7ff9c5; BYTE $0x00 // vmovdqa oword [rsp + 448], xmm1 - LONG $0x3a44b60f; BYTE $0x03 // movzx eax, byte [rdx + rdi + 3] + LONG $0x3244b60f; BYTE $0x03 // movzx eax, byte [rdx + rsi + 3] LONG $0xd86e79c5 // vmovd xmm11, eax LONG $0x1a44b60f; BYTE $0x03 // movzx eax, byte [rdx + rbx + 3] LONG $0xc06e79c5 // vmovd xmm8, eax - LONG $0x3a44b60f; BYTE $0x04 // movzx eax, byte [rdx + rdi + 4] + LONG $0x3244b60f; BYTE $0x04 // movzx eax, byte [rdx + rsi + 4] LONG $0xc86ef9c5 // vmovd xmm1, eax QUAD $0x0001a0248c7ff9c5; BYTE $0x00 // vmovdqa oword [rsp + 416], xmm1 LONG $0x1a44b60f; BYTE $0x04 // movzx eax, byte [rdx + rbx + 4] LONG $0xe86e79c5 // vmovd xmm13, eax - LONG $0x3a44b60f; BYTE $0x05 // movzx eax, byte [rdx + rdi + 5] + LONG $0x3244b60f; BYTE $0x05 // movzx eax, byte [rdx + rsi + 5] LONG $0xf06e79c5 // vmovd xmm14, eax LONG $0x1a44b60f; BYTE $0x05 // movzx eax, byte [rdx + rbx + 5] LONG $0xf06ef9c5 // vmovd xmm6, eax - LONG $0x3a44b60f; BYTE $0x06 // movzx eax, byte [rdx + rdi + 6] - QUAD $0x0000010024bc8948 // mov qword [rsp + 256], rdi + LONG $0x3244b60f; BYTE $0x06 // movzx eax, byte [rdx + rsi + 6] + QUAD $0x000000f024b48948 // mov qword [rsp + 240], rsi LONG $0xe06e79c5 // vmovd xmm12, eax LONG $0x1a44b60f; BYTE $0x06 // movzx eax, byte [rdx + rbx + 6] LONG $0xf86ef9c5 // vmovd xmm7, eax - LONG $0x3a44b60f; BYTE $0x07 // movzx eax, byte [rdx + rdi + 7] + LONG $0x3244b60f; BYTE $0x07 // movzx eax, byte [rdx + rsi + 7] LONG $0xd06ef9c5 // vmovd xmm2, eax LONG $0x1a44b60f; BYTE $0x07 // movzx eax, byte [rdx + rbx + 7] LONG $0xc86ef9c5 // vmovd xmm1, eax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01600d48; WORD $0x0000 // or rax, 352 - QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + QUAD $0x0000008024848948 // mov qword [rsp + 128], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01800d48; WORD $0x0000 // or rax, 384 - QUAD $0x0000012024848948 // mov qword [rsp + 288], rax + LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01a00d48; WORD $0x0000 // or rax, 416 LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax + QUAD $0x0000012024848948 // mov qword [rsp + 288], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01e00d48; WORD $0x0000 // or rax, 480 - LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + QUAD $0x0000014024848948 // mov qword [rsp + 320], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x02200d48; WORD $0x0000 // or rax, 544 - QUAD $0x000000e824848948 // mov qword [rsp + 232], rax + LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax LONG $0x40cb8148; WORD $0x0002; BYTE $0x00 // or rbx, 576 - QUAD $0x000000a8249c8948 // mov qword [rsp + 168], rbx + QUAD $0x00000108249c8948 // mov qword [rsp + 264], rbx WORD $0x8948; BYTE $0xc8 // mov rax, rcx LONG $0x02600d48; WORD $0x0000 // or rax, 608 - LONG $0x24448948; BYTE $0x70 // mov qword [rsp + 112], rax - WORD $0x8949; BYTE $0xcc // mov r12, rcx - LONG $0x80cc8149; WORD $0x0002; BYTE $0x00 // or r12, 640 - QUAD $0x000000f024a4894c // mov qword [rsp + 240], r12 + QUAD $0x000000c824848948 // mov qword [rsp + 200], rax WORD $0x8949; BYTE $0xce // mov r14, rcx - LONG $0xa0ce8149; WORD $0x0002; BYTE $0x00 // or r14, 672 - QUAD $0x000000f824b4894c // mov qword [rsp + 248], r14 - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - LONG $0x02c00d48; WORD $0x0000 // or rax, 704 - LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax + LONG $0x80ce8149; WORD $0x0002; BYTE $0x00 // or r14, 640 + LONG $0x2474894c; BYTE $0x70 // mov qword [rsp + 112], r14 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - LONG $0x02e00d48; WORD $0x0000 // or rax, 736 - WORD $0x8948; BYTE $0xc7 // mov rdi, rax - WORD $0x8949; BYTE $0xc9 // mov r9, rcx - LONG $0x00c98149; WORD $0x0003; BYTE $0x00 // or r9, 768 - QUAD $0x000000c0248c894c // mov qword [rsp + 192], r9 + LONG $0x02a00d48; WORD $0x0000 // or rax, 672 + QUAD $0x000000b824848948 // mov qword [rsp + 184], rax + WORD $0x8949; BYTE $0xcc // mov r12, rcx + LONG $0xc0cc8149; WORD $0x0002; BYTE $0x00 // or r12, 704 + QUAD $0x0000009024a4894c // mov qword [rsp + 144], r12 + WORD $0x8948; BYTE $0xcf // mov rdi, rcx + LONG $0xe0cf8148; WORD $0x0002; BYTE $0x00 // or rdi, 736 + QUAD $0x000000d024bc8948 // mov qword [rsp + 208], rdi WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x20cf8149; WORD $0x0003; BYTE $0x00 // or r15, 800 - QUAD $0x000000b824bc894c // mov qword [rsp + 184], r15 + LONG $0x00cf8149; WORD $0x0003; BYTE $0x00 // or r15, 768 + QUAD $0x000000e024bc894c // mov qword [rsp + 224], r15 WORD $0x8949; BYTE $0xcb // mov r11, rcx - LONG $0x40cb8149; WORD $0x0003; BYTE $0x00 // or r11, 832 - QUAD $0x000000e0249c894c // mov qword [rsp + 224], r11 + LONG $0x20cb8149; WORD $0x0003; BYTE $0x00 // or r11, 800 + QUAD $0x000000e8249c894c // mov qword [rsp + 232], r11 WORD $0x8949; BYTE $0xca // mov r10, rcx - LONG $0x60ca8149; WORD $0x0003; BYTE $0x00 // or r10, 864 + LONG $0x40ca8149; WORD $0x0003; BYTE $0x00 // or r10, 832 LONG $0x2454894c; BYTE $0x58 // mov qword [rsp + 88], r10 + WORD $0x8949; BYTE $0xc9 // mov r9, rcx + LONG $0x60c98149; WORD $0x0003; BYTE $0x00 // or r9, 864 + QUAD $0x000000d8248c894c // mov qword [rsp + 216], r9 WORD $0x8949; BYTE $0xc8 // mov r8, rcx LONG $0x80c88149; WORD $0x0003; BYTE $0x00 // or r8, 896 - QUAD $0x000000802484894c // mov qword [rsp + 128], r8 + LONG $0x2444894c; BYTE $0x50 // mov qword [rsp + 80], r8 WORD $0x8948; BYTE $0xce // mov rsi, rcx LONG $0xa0ce8148; WORD $0x0003; BYTE $0x00 // or rsi, 928 - QUAD $0x000000d024b48948 // mov qword [rsp + 208], rsi + LONG $0x24748948; BYTE $0x48 // mov qword [rsp + 72], rsi WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x00000108248c8948 // mov qword [rsp + 264], rcx + QUAD $0x00000100248c8948 // mov qword [rsp + 256], rcx LONG $0x03c00d48; WORD $0x0000 // or rax, 960 - LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax LONG $0xe0c98148; WORD $0x0003; BYTE $0x00 // or rcx, 992 - LONG $0x244c8948; BYTE $0x50 // mov qword [rsp + 80], rcx - QUAD $0x000000e824ac8b4c // mov r13, qword [rsp + 232] + LONG $0x244c8948; BYTE $0x60 // mov qword [rsp + 96], rcx + LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] LONG $0x207923c4; WORD $0x2a0c; BYTE $0x01 // vpinsrb xmm9, xmm0, byte [rdx + r13], 1 LONG $0x2031e3c4; WORD $0x1a04; BYTE $0x02 // vpinsrb xmm0, xmm9, byte [rdx + rbx], 2 - LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] + QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x03 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 3 - LONG $0x2079a3c4; WORD $0x2204; BYTE $0x04 // vpinsrb xmm0, xmm0, byte [rdx + r12], 4 - LONG $0x2079a3c4; WORD $0x3204; BYTE $0x05 // vpinsrb xmm0, xmm0, byte [rdx + r14], 5 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] - LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x06 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 6 + LONG $0x2079a3c4; WORD $0x3204; BYTE $0x04 // vpinsrb xmm0, xmm0, byte [rdx + r14], 4 + QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] + LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x05 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 5 + LONG $0x2079a3c4; WORD $0x2204; BYTE $0x06 // vpinsrb xmm0, xmm0, byte [rdx + r12], 6 LONG $0x2079e3c4; WORD $0x3a04; BYTE $0x07 // vpinsrb xmm0, xmm0, byte [rdx + rdi], 7 - WORD $0x8949; BYTE $0xfd // mov r13, rdi - QUAD $0x000000c824bc8948 // mov qword [rsp + 200], rdi - LONG $0x2079a3c4; WORD $0x0a04; BYTE $0x08 // vpinsrb xmm0, xmm0, byte [rdx + r9], 8 - LONG $0x2079a3c4; WORD $0x3a04; BYTE $0x09 // vpinsrb xmm0, xmm0, byte [rdx + r15], 9 - LONG $0x2079a3c4; WORD $0x1a04; BYTE $0x0a // vpinsrb xmm0, xmm0, byte [rdx + r11], 10 - LONG $0x2079a3c4; WORD $0x1204; BYTE $0x0b // vpinsrb xmm0, xmm0, byte [rdx + r10], 11 + LONG $0x2079a3c4; WORD $0x3a04; BYTE $0x08 // vpinsrb xmm0, xmm0, byte [rdx + r15], 8 + LONG $0x2079a3c4; WORD $0x1a04; BYTE $0x09 // vpinsrb xmm0, xmm0, byte [rdx + r11], 9 + LONG $0x2079a3c4; WORD $0x1204; BYTE $0x0a // vpinsrb xmm0, xmm0, byte [rdx + r10], 10 + LONG $0x2079a3c4; WORD $0x0a04; BYTE $0x0b // vpinsrb xmm0, xmm0, byte [rdx + r9], 11 LONG $0x2079a3c4; WORD $0x0204; BYTE $0x0c // vpinsrb xmm0, xmm0, byte [rdx + r8], 12 LONG $0x2079e3c4; WORD $0x3204; BYTE $0x0d // vpinsrb xmm0, xmm0, byte [rdx + rsi], 13 LONG $0x2079e3c4; WORD $0x0204; BYTE $0x0e // vpinsrb xmm0, xmm0, byte [rdx + rax], 14 LONG $0x2079e3c4; WORD $0x0a04; BYTE $0x0f // vpinsrb xmm0, xmm0, byte [rdx + rcx], 15 - LONG $0x24748b4c; BYTE $0x78 // mov r14, qword [rsp + 120] + QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] LONG $0x2061a3c4; WORD $0x321c; BYTE $0x01 // vpinsrb xmm3, xmm3, byte [rdx + r14], 1 - LONG $0x24548b4c; BYTE $0x40 // mov r10, qword [rsp + 64] - LONG $0x2061a3c4; WORD $0x121c; BYTE $0x02 // vpinsrb xmm3, xmm3, byte [rdx + r10], 2 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] - LONG $0x2061a3c4; WORD $0x221c; BYTE $0x03 // vpinsrb xmm3, xmm3, byte [rdx + r12], 3 - LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] - LONG $0x2061a3c4; WORD $0x021c; BYTE $0x04 // vpinsrb xmm3, xmm3, byte [rdx + r8], 4 - LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] + LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] + LONG $0x2061a3c4; WORD $0x221c; BYTE $0x02 // vpinsrb xmm3, xmm3, byte [rdx + r12], 2 + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] + LONG $0x2061a3c4; WORD $0x021c; BYTE $0x03 // vpinsrb xmm3, xmm3, byte [rdx + r8], 3 + QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] + LONG $0x2061a3c4; WORD $0x0a1c; BYTE $0x04 // vpinsrb xmm3, xmm3, byte [rdx + r9], 4 + QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] LONG $0x2061a3c4; WORD $0x1a1c; BYTE $0x05 // vpinsrb xmm3, xmm3, byte [rdx + r11], 5 - QUAD $0x000000a0248c8b4c // mov r9, qword [rsp + 160] - LONG $0x2061a3c4; WORD $0x0a1c; BYTE $0x06 // vpinsrb xmm3, xmm3, byte [rdx + r9], 6 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] + QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] + LONG $0x2061a3c4; WORD $0x121c; BYTE $0x06 // vpinsrb xmm3, xmm3, byte [rdx + r10], 6 + QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] LONG $0x2061a3c4; WORD $0x3a1c; BYTE $0x07 // vpinsrb xmm3, xmm3, byte [rdx + r15], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] LONG $0x2061e3c4; WORD $0x321c; BYTE $0x08 // vpinsrb xmm3, xmm3, byte [rdx + rsi], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] LONG $0x2061e3c4; WORD $0x021c; BYTE $0x09 // vpinsrb xmm3, xmm3, byte [rdx + rax], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] LONG $0x2061e3c4; WORD $0x1a1c; BYTE $0x0a // vpinsrb xmm3, xmm3, byte [rdx + rbx], 10 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] LONG $0x2061e3c4; WORD $0x0a1c; BYTE $0x0b // vpinsrb xmm3, xmm3, byte [rdx + rcx], 11 - QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0c // vpinsrb xmm3, xmm3, byte [rdx + rdi], 12 LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0d // vpinsrb xmm3, xmm3, byte [rdx + rdi], 13 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0e // vpinsrb xmm3, xmm3, byte [rdx + rdi], 14 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x0f // vpinsrb xmm3, xmm3, byte [rdx + rdi], 15 - QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x01013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 1 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] - QUAD $0x02013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 2 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x0000010824ac8b4c // mov r13, qword [rsp + 264] + QUAD $0x02012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 2 + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x03013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 3 - QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] - QUAD $0x04013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 4 - QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] + LONG $0x246c8b4c; BYTE $0x70 // mov r13, qword [rsp + 112] + QUAD $0x04012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 4 + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] QUAD $0x05013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 5 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] - QUAD $0x06013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 6 + QUAD $0x0000009024ac8b4c // mov r13, qword [rsp + 144] + QUAD $0x06012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 6 + QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] QUAD $0x07012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 7 - QUAD $0x000000c024ac8b4c // mov r13, qword [rsp + 192] - QUAD $0x08012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 8 - QUAD $0x000000b824ac8b4c // mov r13, qword [rsp + 184] - QUAD $0x09012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 9 QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] - QUAD $0x0a013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 10 + QUAD $0x08013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 8 + QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] + QUAD $0x09013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 9 LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x0a013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 10 + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] QUAD $0x0b013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 11 - QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] + LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x0c013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 12 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x0d013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x0e013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 14 - LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0f013a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 1], 15 QUAD $0x0101326c2029a3c4 // vpinsrb xmm5, xmm10, byte [rdx + r14 + 1], 1 - QUAD $0x0201126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 1], 2 - QUAD $0x0301226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 1], 3 - QUAD $0x0401026c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r8 + 1], 4 + QUAD $0x0201226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 1], 2 + WORD $0x894c; BYTE $0xe7 // mov rdi, r12 + QUAD $0x0301026c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r8 + 1], 3 + QUAD $0x04010a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 1], 4 QUAD $0x05011a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 1], 5 - QUAD $0x06010a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 1], 6 + QUAD $0x0601126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 1], 6 QUAD $0x07013a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r15 + 1], 7 QUAD $0x0801326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 1], 8 QUAD $0x0901026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 9 QUAD $0x0a011a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 1], 10 QUAD $0x0b010a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 1], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 12 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 14 LONG $0x386563c4; WORD $0x01f8 // vinserti128 ymm15, ymm3, xmm0, 1 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0f0102442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 1], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x08 // movzx esi, byte [rdx + rax + 8] LONG $0xce6e79c5 // vmovd xmm9, esi LONG $0x387de3c4; WORD $0x01c4 // vinserti128 ymm0, ymm0, xmm4, 1 QUAD $0x0004c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1216], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x08 // movzx esi, byte [rdx + rax + 8] LONG $0xd66e79c5 // vmovd xmm10, esi - QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] QUAD $0x0001e024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 480] QUAD $0x010202442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 2], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x02020a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 2], 2 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] + QUAD $0x00000108248c8b4c // mov r9, qword [rsp + 264] + QUAD $0x02020a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 2], 2 + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] QUAD $0x030212442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 2], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x040202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x050202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 5 - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] - QUAD $0x06020a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 2], 6 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] - QUAD $0x07023a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 2], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x080202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 8 - WORD $0x894d; BYTE $0xec // mov r12, r13 - QUAD $0x09022a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 2], 9 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x060202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 6 + QUAD $0x07022a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 2], 7 + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] + QUAD $0x08020a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 2], 8 + QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] + QUAD $0x090222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 2], 9 + LONG $0x246c8b4c; BYTE $0x58 // mov r13, qword [rsp + 88] QUAD $0x0a022a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 2], 10 - LONG $0x245c8b4c; BYTE $0x58 // mov r11, qword [rsp + 88] + QUAD $0x000000d8249c8b4c // mov r11, qword [rsp + 216] QUAD $0x0b021a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 2], 11 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] + LONG $0x24748b4c; BYTE $0x50 // mov r14, qword [rsp + 80] QUAD $0x0c0232442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 2], 12 - QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] QUAD $0x0d023a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 2], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0e0202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0f0202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 15 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] QUAD $0x0001c0249c6ff9c5; BYTE $0x00 // vmovdqa xmm3, oword [rsp + 448] QUAD $0x0102025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 2], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x0202325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x02023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0302325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0402325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x0502325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0602325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] QUAD $0x0702325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 7 - QUAD $0x00000088249c8b48 // mov rbx, qword [rsp + 136] + QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] QUAD $0x08021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 8 - QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] - QUAD $0x09021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 10 - QUAD $0x000000d8249c8b48 // mov rbx, qword [rsp + 216] - QUAD $0x0b021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 11 - QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 12 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x0d021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 13 - LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] - QUAD $0x0e021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 14 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] - QUAD $0x0f021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 15 + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x09023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 9 + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + QUAD $0x0a023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 10 + QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] + QUAD $0x0b023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 11 + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + QUAD $0x0c023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 12 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0d023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 13 + QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] + QUAD $0x0e023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 14 + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + QUAD $0x0f023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 15 QUAD $0x010302642021a3c4 // vpinsrb xmm4, xmm11, byte [rdx + r8 + 3], 1 - QUAD $0x02030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 2 + QUAD $0x02030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 2 QUAD $0x030312642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 3], 3 - QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] - QUAD $0x04031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 4 - QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] - QUAD $0x05030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 5 - QUAD $0x06030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 6 - QUAD $0x07033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 7 - QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] - QUAD $0x08033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 8 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x04033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 4 + QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] + QUAD $0x050302642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 3], 5 + QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] + QUAD $0x06033a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 3], 6 + QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] + QUAD $0x07030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 7 + QUAD $0x08030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 8 QUAD $0x090322642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 3], 9 QUAD $0x0a032a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 3], 10 QUAD $0x0b031a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 3], 11 + WORD $0x894d; BYTE $0xdc // mov r12, r11 QUAD $0x0c0332642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 3], 12 QUAD $0x0d033a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 3], 13 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] - QUAD $0x0e030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 14 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] - QUAD $0x0f033a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 3], 15 - QUAD $0x0103026c2039e3c4 // vpinsrb xmm5, xmm8, byte [rdx + rax + 3], 1 LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] - QUAD $0x02031a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 3], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0e031a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 3], 14 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x0f030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 15 + QUAD $0x0103026c2039e3c4 // vpinsrb xmm5, xmm8, byte [rdx + rax + 3], 1 + WORD $0x8949; BYTE $0xc5 // mov r13, rax + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0203026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0303026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0403026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 4 - LONG $0x24548b4c; BYTE $0x60 // mov r10, qword [rsp + 96] - QUAD $0x0503126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 3], 5 - QUAD $0x000000a024b48b4c // mov r14, qword [rsp + 160] - QUAD $0x0603326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 3], 6 - QUAD $0x0703326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 7 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] - QUAD $0x0803026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 8 - QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] - QUAD $0x09031a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 3], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0503026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x0603026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 6 + QUAD $0x0703326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 7 + QUAD $0x08031a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 3], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0903026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 12 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 13 LONG $0x3865e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm3, xmm0, 1 QUAD $0x0001e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 480], ymm0 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e0302442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 3], 14 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x09 // movzx esi, byte [rdx + rax + 9] + QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] + LONG $0x0a74b60f; BYTE $0x09 // movzx esi, byte [rdx + rcx + 9] LONG $0xc66e79c5 // vmovd xmm8, esi - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0322442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 3], 15 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f0302442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 3], 15 LONG $0x387de3c4; WORD $0x01c4 // vinserti128 ymm0, ymm0, xmm4, 1 QUAD $0x0001c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 448], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x09 // movzx esi, byte [rdx + rax + 9] + QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] + LONG $0x0a74b60f; BYTE $0x09 // movzx esi, byte [rdx + rcx + 9] LONG $0xde6e79c5 // vmovd xmm11, esi + LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] QUAD $0x0001a024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 416] - QUAD $0x010402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x020402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x030402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 3 - QUAD $0x000000f024ac8b4c // mov r13, qword [rsp + 240] - QUAD $0x04042a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 4], 4 - QUAD $0x05040a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 4], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x060402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x070402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 7 - QUAD $0x08043a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 4], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 10 + QUAD $0x010432442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 4], 1 + QUAD $0x0000010824bc8b4c // mov r15, qword [rsp + 264] + QUAD $0x02043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 2 + QUAD $0x030412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 4], 3 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x04043a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 4], 4 + WORD $0x894c; BYTE $0xc1 // mov rcx, r8 + QUAD $0x050402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 5 + QUAD $0x00000090249c8b48 // mov rbx, qword [rsp + 144] + QUAD $0x06041a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 4], 6 + QUAD $0x07040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 7 + QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] + QUAD $0x08040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 8 + QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] + QUAD $0x090402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 9 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 13 - QUAD $0x0e040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 14 - QUAD $0x0f043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 15 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x0104025c2011e3c4 // vpinsrb xmm3, xmm13, byte [rdx + rax + 4], 1 - QUAD $0x02041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 2 - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] - QUAD $0x03041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0404025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 4 - QUAD $0x0504125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 4], 5 - WORD $0x894c; BYTE $0xf6 // mov rsi, r14 - QUAD $0x0604325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 4], 6 - QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x0704125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 4], 7 - QUAD $0x00000088248c8b4c // mov r9, qword [rsp + 136] - QUAD $0x08040a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 4], 8 - QUAD $0x09041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 10 - QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] - QUAD $0x0b04325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 4], 11 - QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 12 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x0d041a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 4], 13 - LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] - QUAD $0x0e043a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 4], 14 - QUAD $0x0f04225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 4], 15 - QUAD $0x010502642009a3c4 // vpinsrb xmm4, xmm14, byte [rdx + r8 + 5], 1 - QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] + QUAD $0x0a0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 10 + QUAD $0x0b0422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 4], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c0432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 4], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d0432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 4], 13 + QUAD $0x0e041a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 4], 14 + LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] + QUAD $0x0f0422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 4], 15 + QUAD $0x01042a5c2011a3c4 // vpinsrb xmm3, xmm13, byte [rdx + r13 + 4], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0204325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0304325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0404325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x0504025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x0604325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 6 + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x0704025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0804025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0904025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 10 + QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] + QUAD $0x0b041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 11 + LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] + QUAD $0x0c041a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 4], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 15 + QUAD $0x010532642009a3c4 // vpinsrb xmm4, xmm14, byte [rdx + r14 + 5], 1 QUAD $0x02053a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 5], 2 - LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] - QUAD $0x03051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 3 - QUAD $0x04052a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 5], 4 + WORD $0x894d; BYTE $0xfe // mov r14, r15 + QUAD $0x030512642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 5], 3 + QUAD $0x04053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 4 QUAD $0x05050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x07050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 7 - QUAD $0x08053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x09050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 10 - LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] - QUAD $0x0b053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 11 - QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] - QUAD $0x0c053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 12 - QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] - QUAD $0x0d052a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 5], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] - QUAD $0x0e053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 14 - LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] - QUAD $0x0f053a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 5], 15 - LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] - QUAD $0x01053a6c2049e3c4 // vpinsrb xmm5, xmm6, byte [rdx + rdi + 5], 1 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x02053a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 5], 2 - QUAD $0x03051a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 5], 3 + WORD $0x8949; BYTE $0xca // mov r10, rcx + QUAD $0x06051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 6 + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + QUAD $0x07053a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 5], 7 + QUAD $0x08050a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 5], 8 + QUAD $0x090502642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 5], 9 + LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] + QUAD $0x0a0502642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 5], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b050a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 5], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 14 + QUAD $0x0f0522642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 5], 15 + QUAD $0x01052a6c2049a3c4 // vpinsrb xmm5, xmm6, byte [rdx + r13 + 5], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0205026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0305026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 3 + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0405026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x0505026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 5 QUAD $0x0605326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 5], 6 - QUAD $0x0705126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 5], 7 - QUAD $0x08050a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 5], 8 - QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] - QUAD $0x09050a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 5], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x000000f8249c8b48 // mov rbx, qword [rsp + 248] + QUAD $0x07051a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 5], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0805026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0905026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 10 - QUAD $0x0b05326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 5], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 12 + QUAD $0x0000008024ac8b4c // mov r13, qword [rsp + 128] + QUAD $0x0b052a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 5], 11 + QUAD $0x0c051a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 5], 12 + WORD $0x894d; BYTE $0xdc // mov r12, r11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 14 LONG $0x386563c4; WORD $0x01f0 // vinserti128 ymm14, ymm3, xmm0, 1 - QUAD $0x0f0522442051a3c4 // vpinsrb xmm0, xmm5, byte [rdx + r12 + 5], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f0502442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 5], 15 + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x0a // movzx esi, byte [rdx + rax + 10] LONG $0xde6ef9c5 // vmovd xmm3, esi LONG $0x387de3c4; WORD $0x01c4 // vinserti128 ymm0, ymm0, xmm4, 1 QUAD $0x0001a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 416], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x0a // movzx esi, byte [rdx + rax + 10] + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x0a // movzx esi, byte [rdx + rsi + 10] LONG $0xe66ef9c5 // vmovd xmm4, esi - WORD $0x894d; BYTE $0xc6 // mov r14, r8 - QUAD $0x010602442019a3c4 // vpinsrb xmm0, xmm12, byte [rdx + r8 + 6], 1 - QUAD $0x02063a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 6], 2 - QUAD $0x03061a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 6], 3 - QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] - QUAD $0x04061a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 6], 4 - QUAD $0x000000f824848b4c // mov r8, qword [rsp + 248] - QUAD $0x050602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + QUAD $0x01063a442019e3c4 // vpinsrb xmm0, xmm12, byte [rdx + rdi + 6], 1 + QUAD $0x020632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 6], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x030602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 3 + LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] + QUAD $0x040632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 6], 4 + QUAD $0x050612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 6], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] QUAD $0x060602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 6 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] - QUAD $0x07063a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 6], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x080602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 9 - QUAD $0x0a060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 10 - LONG $0x24548b4c; BYTE $0x58 // mov r10, qword [rsp + 88] - QUAD $0x0b0612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 6], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c0602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 12 - QUAD $0x0d062a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 6], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 15 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] + QUAD $0x07063a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 6], 7 + QUAD $0x08060a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 6], 8 + QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + QUAD $0x090632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 6], 9 + QUAD $0x0a0602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 10 + QUAD $0x0b060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 11 + LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + QUAD $0x0c0602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 12 + LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] + QUAD $0x0d060a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 6], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e0632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 6], 14 + LONG $0x24548b4c; BYTE $0x60 // mov r10, qword [rsp + 96] + QUAD $0x0f0612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 6], 15 + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] QUAD $0x01060a6c2041e3c4 // vpinsrb xmm5, xmm7, byte [rdx + rcx + 6], 1 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x02060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] - QUAD $0x03060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 3 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] - QUAD $0x04060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0206326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0306326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0406326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x0506326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 5 - QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] - QUAD $0x06061a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 6], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x07060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 8 - QUAD $0x09060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x06060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 6 + QUAD $0x07061a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 6], 7 + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] + QUAD $0x08061a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 6], 8 + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x09061a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 6], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0a060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 10 - QUAD $0x000000d824a48b4c // mov r12, qword [rsp + 216] - QUAD $0x0b06226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 11 - QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] - QUAD $0x0c060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 12 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] - QUAD $0x0d062a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 6], 13 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + WORD $0x894d; BYTE $0xef // mov r15, r13 + QUAD $0x0b062a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 6], 11 + QUAD $0x0c06226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 12 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x0d060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 13 + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] QUAD $0x0e060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 14 - LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] + QUAD $0x0000014024ac8b4c // mov r13, qword [rsp + 320] QUAD $0x0f062a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 6], 15 - QUAD $0x010732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 7], 1 - QUAD $0x02073a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 7], 2 - LONG $0x246c8b4c; BYTE $0x70 // mov r13, qword [rsp + 112] - QUAD $0x03072a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 7], 3 - QUAD $0x04071a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 7], 4 - QUAD $0x050702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 7], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 6 - QUAD $0x07073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 7 - QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] - QUAD $0x080732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 7], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x09070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 10 - QUAD $0x0b0712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 7], 11 - QUAD $0x0c0702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 12 + QUAD $0x01073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 1 + QUAD $0x0000010824ac8b4c // mov r13, qword [rsp + 264] + QUAD $0x02072a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 7], 2 + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x03073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 3 + QUAD $0x040732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 7], 4 + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x05073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 5 + QUAD $0x060702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 6 QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d0702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 13 - LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] - QUAD $0x0e073a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 7], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 15 + QUAD $0x070702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 7 + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] + QUAD $0x08070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x09070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 9 + LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x0a073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 10 + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x0b073a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 7], 11 + QUAD $0x0c0702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 7], 12 + QUAD $0x0d070a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 7], 13 + LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + QUAD $0x0e070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 14 + QUAD $0x0f0712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 7], 15 + QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] + QUAD $0x0107324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 7], 1 LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x01070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 1 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x02073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x02070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 2 + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x03070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 3 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] - QUAD $0x04073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 4 + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x04070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 4 QUAD $0x0507324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 7], 5 - QUAD $0x06071a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 7], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0707324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 7], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 8 - QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] - QUAD $0x09073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x06070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 6 + QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] + QUAD $0x07070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 7 + QUAD $0x08071a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 7], 8 + QUAD $0x09071a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 7], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0a070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 10 - QUAD $0x0b07224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 7], 11 - QUAD $0x0c070a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 7], 12 + QUAD $0x0b073a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 7], 11 + QUAD $0x0c07224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 7], 12 LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x0d070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 13 LONG $0x3855e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm5, xmm0, 1 QUAD $0x0004a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1184], ymm0 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] QUAD $0x0e070a442071e3c4 // vpinsrb xmm0, xmm1, byte [rdx + rcx + 7], 14 - QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] + QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] LONG $0x0a74b60f; BYTE $0x0b // movzx esi, byte [rdx + rcx + 11] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0722442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 7], 15 + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x0f070a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 7], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00048024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1152], ymm0 - QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] + QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] LONG $0x0a74b60f; BYTE $0x0b // movzx esi, byte [rdx + rcx + 11] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x01080a442031e3c4 // vpinsrb xmm0, xmm9, byte [rdx + rcx + 8], 1 - QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] - QUAD $0x020802442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 8], 2 - QUAD $0x03082a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 8], 3 - WORD $0x894d; BYTE $0xdd // mov r13, r11 - QUAD $0x04081a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 8], 4 - QUAD $0x000000f8249c8b4c // mov r11, qword [rsp + 248] - QUAD $0x05081a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 8], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 6 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] - QUAD $0x070832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 7 - QUAD $0x080832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 8], 8 - QUAD $0x000000b824948b4c // mov r10, qword [rsp + 184] - QUAD $0x090812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 8], 9 - QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] + WORD $0x894d; BYTE $0xef // mov r15, r13 + QUAD $0x02082a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 8], 2 + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x03080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x040832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 4 + QUAD $0x000000b824ac8b4c // mov r13, qword [rsp + 184] + QUAD $0x05082a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 8], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x060832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 6 + WORD $0x8949; BYTE $0xc0 // mov r8, rax + QUAD $0x070802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 7 + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x08083a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 8], 8 + QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] + QUAD $0x090822442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 8], 9 + LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] QUAD $0x0a081a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 8], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b0832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c0832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 12 + QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] + QUAD $0x0b080a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 8], 11 + LONG $0x24548b4c; BYTE $0x50 // mov r10, qword [rsp + 80] + QUAD $0x0c0812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 8], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d0802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 13 - QUAD $0x0e083a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 8], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0f0802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 15 + QUAD $0x0108326c2029a3c4 // vpinsrb xmm5, xmm10, byte [rdx + r14 + 8], 1 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x0108026c2029e3c4 // vpinsrb xmm5, xmm10, byte [rdx + rax + 8], 1 - LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] - QUAD $0x02080a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 8], 2 - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] - QUAD $0x03083a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r15 + 8], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x0208026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0308026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0408326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 8], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0508026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 5 - QUAD $0x000000a024b48b4c // mov r14, qword [rsp + 160] - QUAD $0x0608326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 8], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x0708026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 7 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] - QUAD $0x0808026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 8 - QUAD $0x09083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0508026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x0608026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 6 + QUAD $0x000000f824b48b4c // mov r14, qword [rsp + 248] + QUAD $0x0708326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 8], 7 + QUAD $0x08081a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 8], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0908026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 11 - QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] - QUAD $0x0c083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 12 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] - QUAD $0x0d083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 13 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] - QUAD $0x0e083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 14 - QUAD $0x0f08226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 8], 15 - QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] - QUAD $0x010922742039a3c4 // vpinsrb xmm6, xmm8, byte [rdx + r12 + 9], 1 - QUAD $0x020902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 2 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] - QUAD $0x03093a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rdi + 9], 3 - QUAD $0x04092a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r13 + 9], 4 - QUAD $0x05091a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r11 + 9], 5 - QUAD $0x06090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x07090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x08090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 8 - QUAD $0x090912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 9 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f08026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 15 + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x010902742039e3c4 // vpinsrb xmm6, xmm8, byte [rdx + rax + 9], 1 + QUAD $0x02093a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r15 + 9], 2 + QUAD $0x03090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x040902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 4 + QUAD $0x05092a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r13 + 9], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x060902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 6 + QUAD $0x070902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 7 + QUAD $0x08093a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rdi + 9], 8 + QUAD $0x090922742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r12 + 9], 9 QUAD $0x0a091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 10 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x0b090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 11 - QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] - QUAD $0x0c091a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r11 + 9], 12 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] - QUAD $0x0d090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 14 - LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] - QUAD $0x0f0922742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r12 + 9], 15 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x01090a7c2021e3c4 // vpinsrb xmm7, xmm11, byte [rdx + rcx + 9], 1 - QUAD $0x02090a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r9 + 9], 2 - QUAD $0x03093a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 9], 3 + QUAD $0x0b090a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r9 + 9], 11 + QUAD $0x0c0912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 12 + LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] + QUAD $0x0d0912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f0902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x0109027c2021e3c4 // vpinsrb xmm7, xmm11, byte [rdx + rax + 9], 1 + LONG $0x246c8b4c; BYTE $0x78 // mov r13, qword [rsp + 120] + QUAD $0x02092a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 9], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0309027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 3 QUAD $0x0409327c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rsi + 9], 4 - LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] - QUAD $0x05092a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 9], 5 - QUAD $0x0609327c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r14 + 9], 6 - QUAD $0x00000090249c8b48 // mov rbx, qword [rsp + 144] - QUAD $0x07091a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rbx + 9], 7 - QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] - QUAD $0x08093a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 9], 8 - QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - QUAD $0x09090a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rcx + 9], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] - QUAD $0x0a090a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rcx + 9], 10 + QUAD $0x00000088249c8b48 // mov rbx, qword [rsp + 136] + QUAD $0x05091a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rbx + 9], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x0609027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 6 + QUAD $0x0709327c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r14 + 9], 7 + QUAD $0x08091a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r11 + 9], 8 + WORD $0x894d; BYTE $0xde // mov r14, r11 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0909027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 10 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 12 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 14 LONG $0x3855e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm5, xmm0, 1 QUAD $0x00046024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1120], ymm0 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0f09026c2041e3c4 // vpinsrb xmm5, xmm7, byte [rdx + rax + 9], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x0c // movzx esi, byte [rdx + rax + 12] LONG $0xc66ef9c5 // vmovd xmm0, esi LONG $0x3855e3c4; WORD $0x01ee // vinserti128 ymm5, ymm5, xmm6, 1 QUAD $0x00044024ac7ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1088], ymm5 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x0c // movzx esi, byte [rdx + rax + 12] LONG $0xee6ef9c5 // vmovd xmm5, esi - QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] - QUAD $0x010a3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 10], 1 - QUAD $0x020a025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 10], 2 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x010a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 1 + QUAD $0x020a3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 10], 2 QUAD $0x030a0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 10], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] - QUAD $0x040a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x050a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x060a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 6 - QUAD $0x000000c8248c8b4c // mov r9, qword [rsp + 200] - QUAD $0x070a0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 10], 7 - QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] - QUAD $0x080a325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 10], 8 - QUAD $0x090a125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 10], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x040a3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 10], 4 + QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] + QUAD $0x050a025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 10], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x060a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x070a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 7 + QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] + QUAD $0x080a0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 10], 8 + QUAD $0x090a225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 10], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a0a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] QUAD $0x0b0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 11 - QUAD $0x0c0a1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 10], 12 - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 12 QUAD $0x0d0a125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 10], 13 - LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] - QUAD $0x0e0a1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 10], 14 - QUAD $0x0f0a225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 10], 15 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x010a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 1 LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x020a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x0e0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f0a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x010a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 1 + QUAD $0x020a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x030a32642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rsi + 10], 3 - LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] - QUAD $0x040a22642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 10], 4 - QUAD $0x050a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x060a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 6 + QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] + QUAD $0x040a1a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 10], 4 + QUAD $0x050a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 5 + QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] + QUAD $0x060a12642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 10], 6 + QUAD $0x000000f8249c8b48 // mov rbx, qword [rsp + 248] QUAD $0x070a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 7 - QUAD $0x080a3a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 10], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x090a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 10 - QUAD $0x000000d824bc8b4c // mov r15, qword [rsp + 216] - QUAD $0x0b0a3a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 10], 11 + QUAD $0x080a32642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 10], 8 + QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] + QUAD $0x090a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 9 + LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] + QUAD $0x0a0a32642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 10], 10 + QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] + QUAD $0x0b0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c0a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 12 + LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + QUAD $0x0d0a22642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 10], 13 QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 12 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x0d0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 13 - LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] - QUAD $0x0e0a2a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 10], 14 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + QUAD $0x0e0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 14 + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0f0a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 15 - QUAD $0x010b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 1 - QUAD $0x020b024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 11], 2 + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x010b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 1 + QUAD $0x020b3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 11], 2 QUAD $0x030b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 3 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] - QUAD $0x040b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 4 - QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] - QUAD $0x050b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 5 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] - QUAD $0x060b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 6 - QUAD $0x070b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 7 - QUAD $0x080b324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 11], 8 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] - QUAD $0x090b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 9 - QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] - QUAD $0x0a0b324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 11], 10 - LONG $0x244c8b4c; BYTE $0x58 // mov r9, qword [rsp + 88] - QUAD $0x0b0b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 11 - QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] - QUAD $0x0c0b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 12 - QUAD $0x0d0b124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 11], 13 - QUAD $0x0e0b1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 11], 14 - LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] - QUAD $0x0f0b1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 11], 15 + QUAD $0x040b3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 11], 4 + QUAD $0x050b024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 11], 5 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x060b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x070b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 7 + QUAD $0x080b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 8 + QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + QUAD $0x090b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 13 + LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] + QUAD $0x0e0b024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 11], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x010b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 1 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] - QUAD $0x010b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 11], 1 - LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] - QUAD $0x020b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 2 + QUAD $0x020b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 11], 2 QUAD $0x030b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 3 - QUAD $0x040b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 11], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x040b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 11], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x050b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 5 - QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] - QUAD $0x060b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 6 - QUAD $0x0000009024848b4c // mov r8, qword [rsp + 144] - QUAD $0x070b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 11], 7 - QUAD $0x0000008824a48b4c // mov r12, qword [rsp + 136] - QUAD $0x080b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 11], 8 - QUAD $0x090b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 10 - QUAD $0x0b0b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 11], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 13 + QUAD $0x060b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 11], 6 + QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] + QUAD $0x070b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 11], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x080b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 8 + QUAD $0x090b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 11], 9 + QUAD $0x0a0b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 11], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b0b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 11 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x0c0b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 11], 12 + QUAD $0x0d0b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 11], 13 LONG $0x385de3c4; WORD $0x01db // vinserti128 ymm3, ymm4, xmm3, 1 QUAD $0x000420249c7ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1056], ymm3 + QUAD $0x0000012024ac8b4c // mov r13, qword [rsp + 288] QUAD $0x0e0b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 11], 14 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x0d // movzx esi, byte [rdx + rax + 13] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x0d // movzx esi, byte [rdx + rsi + 13] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x0f0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 15 + QUAD $0x0f0b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 15 LONG $0x386de3c4; WORD $0x01c9 // vinserti128 ymm1, ymm2, xmm1, 1 QUAD $0x000400248c7ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1024], ymm1 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x0d // movzx esi, byte [rdx + rax + 13] + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x0d // movzx esi, byte [rdx + rsi + 13] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] - QUAD $0x010c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x020c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x030c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 3 - QUAD $0x040c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 4 - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x050c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 12], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x060c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x070c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x080c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 9 + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x010c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 1 + QUAD $0x0000010824a48b4c // mov r12, qword [rsp + 264] + QUAD $0x020c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 12], 2 + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x030c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 12], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x040c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 4 + QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] + QUAD $0x050c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 5 + QUAD $0x060c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 6 + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x070c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x080c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 8 + QUAD $0x000000e8248c8b4c // mov r9, qword [rsp + 232] + QUAD $0x090c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 12], 9 + LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] QUAD $0x0a0c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 12], 10 - QUAD $0x0b0c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 12], 11 - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] - QUAD $0x0c0c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d0c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 13 - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] - QUAD $0x0e0c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 12], 14 - QUAD $0x0f0c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 12], 15 - QUAD $0x010c3a542051e3c4 // vpinsrb xmm2, xmm5, byte [rdx + rdi + 12], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x020c32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 12], 2 - QUAD $0x000000b024b48b4c // mov r14, qword [rsp + 176] - QUAD $0x030c32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 12], 3 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] - QUAD $0x040c3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 12], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x050c3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 12], 5 - QUAD $0x060c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 6 - QUAD $0x070c02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 12], 7 - WORD $0x894c; BYTE $0xe0 // mov rax, r12 - QUAD $0x080c22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 12], 8 - QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] - QUAD $0x090c1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 12], 9 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] - QUAD $0x0a0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 10 QUAD $0x000000d8249c8b48 // mov rbx, qword [rsp + 216] - QUAD $0x0b0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 11 - QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] - QUAD $0x0c0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 12 - LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] - QUAD $0x0d0c0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 12], 13 - LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] - QUAD $0x0e0c02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 12], 14 - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0c22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 12], 15 - QUAD $0x000000e8249c8b48 // mov rbx, qword [rsp + 232] - QUAD $0x010d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 1 - QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] - QUAD $0x020d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 2 - LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] - QUAD $0x030d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 3 - QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] - QUAD $0x040d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 4 - QUAD $0x050d125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 13], 5 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] - QUAD $0x060d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 6 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] - QUAD $0x070d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 7 - QUAD $0x000000c0249c8b48 // mov rbx, qword [rsp + 192] - QUAD $0x080d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 8 - QUAD $0x000000b824a48b4c // mov r12, qword [rsp + 184] - QUAD $0x090d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 13], 9 - QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] - QUAD $0x0a0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 10 - LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] + QUAD $0x0b0c1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 12], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c0c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d0c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 13 + QUAD $0x0e0c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 12], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f0c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 15 + QUAD $0x010c02542051e3c4 // vpinsrb xmm2, xmm5, byte [rdx + rax + 12], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x020c32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 12], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x030c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 3 + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x040c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 4 + QUAD $0x0000008824948b4c // mov r10, qword [rsp + 136] + QUAD $0x050c12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 12], 5 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x060c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 6 + QUAD $0x070c3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 12], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x080c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 8 + QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x090c02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 12], 9 + LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] + QUAD $0x0a0c1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 12], 10 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x0b0c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c0c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 12 + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + QUAD $0x0d0c3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 12], 13 + QUAD $0x0e0c2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 12], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f0c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 15 + LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] + QUAD $0x010d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 13], 1 + QUAD $0x020d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 13], 2 + QUAD $0x030d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 13], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x040d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 13], 4 + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x050d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 13], 5 + QUAD $0x0000009024a48b4c // mov r12, qword [rsp + 144] + QUAD $0x060d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 13], 6 + QUAD $0x070d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 13], 7 + QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] + QUAD $0x080d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 13], 8 + QUAD $0x090d0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 13], 9 + QUAD $0x0a0d325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 13], 10 QUAD $0x0b0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 11 + LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] QUAD $0x0c0d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 13], 12 - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] - QUAD $0x0d0d125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 13], 13 - QUAD $0x0e0d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 13], 14 - LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] - QUAD $0x0f0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 15 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x0d0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 13], 14 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x0f0d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 13], 15 + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] QUAD $0x010d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 1 QUAD $0x020d324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 13], 2 - QUAD $0x030d324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 13], 3 - QUAD $0x040d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 4 - QUAD $0x050d3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 13], 5 - QUAD $0x000000a024b48b4c // mov r14, qword [rsp + 160] - QUAD $0x060d324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 13], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x070d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 7 - QUAD $0x080d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 8 - QUAD $0x090d1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 13], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x030d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 3 + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x040d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 4 + QUAD $0x050d124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 13], 5 + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x060d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 6 + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] + QUAD $0x070d324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 13], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x080d324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 13], 8 + QUAD $0x090d024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 13], 9 + QUAD $0x0a0d1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 13], 10 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 12 - QUAD $0x0d0d0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 13], 13 - QUAD $0x0e0d024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 13], 14 + QUAD $0x0d0d3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 13], 13 + QUAD $0x0000012024b48b4c // mov r14, qword [rsp + 288] + QUAD $0x0e0d324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 13], 14 LONG $0x386de3c4; WORD $0x01c0 // vinserti128 ymm0, ymm2, xmm0, 1 QUAD $0x0003e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 992], ymm0 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0f0d02442071e3c4 // vpinsrb xmm0, xmm1, byte [rdx + rax + 13], 15 - QUAD $0x0000010024ac8b4c // mov r13, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x0e2a // movzx esi, byte [rdx + r13 + 14] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x0274b60f; BYTE $0x0e // movzx esi, byte [rdx + rax + 14] LONG $0xce6ef9c5 // vmovd xmm1, esi LONG $0x387de3c4; WORD $0x01c3 // vinserti128 ymm0, ymm0, xmm3, 1 QUAD $0x0003c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 960], ymm0 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x0e // movzx esi, byte [rdx + rax + 14] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x010e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x020e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 2 - LONG $0x24448b4c; BYTE $0x70 // mov r8, qword [rsp + 112] - QUAD $0x030e024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 14], 3 - QUAD $0x000000f0248c8b4c // mov r9, qword [rsp + 240] - QUAD $0x040e0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 14], 4 - QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] - QUAD $0x050e3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 14], 5 - LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] - QUAD $0x060e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 14], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x070e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x080e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 8 - QUAD $0x090e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 14], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a0e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 10 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x0b0e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c0e324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 14], 12 - QUAD $0x0d0e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 14], 13 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x020e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 14], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x030e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x040e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 4 + QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] + QUAD $0x050e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 14], 5 + QUAD $0x060e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 14], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x070e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 7 + QUAD $0x080e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 14], 8 + QUAD $0x000000e824ac8b4c // mov r13, qword [rsp + 232] + QUAD $0x090e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 14], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 12 + QUAD $0x0d0e1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 14], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f0e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 15 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x010e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 14], 1 + LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] + QUAD $0x020e1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 14], 2 + QUAD $0x030e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 3 + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x040e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 14], 4 + QUAD $0x00000088248c8b4c // mov r9, qword [rsp + 136] + QUAD $0x050e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 14], 5 + QUAD $0x060e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 6 + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x070e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 14], 7 + QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] + QUAD $0x080e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 14], 8 + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x090e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x0a0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 10 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x0b0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 11 LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e0e324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 14], 14 - QUAD $0x0f0e1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 14], 15 - LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] - QUAD $0x010e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 14], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x020e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] - QUAD $0x030e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x040e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x050e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 5 - QUAD $0x060e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 14], 6 - QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x070e12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 14], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] - QUAD $0x080e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 8 - QUAD $0x0000009824a48b4c // mov r12, qword [rsp + 152] - QUAD $0x090e22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 14], 9 - QUAD $0x0000014024b48b4c // mov r14, qword [rsp + 320] - QUAD $0x0a0e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 14], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] - QUAD $0x0b0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] QUAD $0x0c0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 12 - LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] - QUAD $0x0d0e1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 14], 13 - LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] - QUAD $0x0e0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - QUAD $0x0f0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 15 - LONG $0x74b60f42; WORD $0x0f2a // movzx esi, byte [rdx + r13 + 15] + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] + QUAD $0x0d0e02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 14], 13 + QUAD $0x0e0e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 14], 14 + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + QUAD $0x0f0e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 15 + QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] + LONG $0x74b60f42; WORD $0x0f32 // movzx esi, byte [rdx + r14 + 15] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x010f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x020f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 2 - QUAD $0x030f02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 15], 3 - QUAD $0x040f0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 15], 4 - QUAD $0x050f3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 15], 5 - QUAD $0x060f3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 15], 6 - QUAD $0x000000c824ac8b4c // mov r13, qword [rsp + 200] - QUAD $0x070f2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 15], 7 - QUAD $0x000000c024848b4c // mov r8, qword [rsp + 192] - QUAD $0x080f02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 15], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x090f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 9 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] - QUAD $0x0a0f3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 15], 10 - QUAD $0x0b0f0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 15], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x010f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 1 + QUAD $0x020f12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 15], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x030f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 3 + LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] + QUAD $0x040f12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 15], 4 + QUAD $0x050f3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 15], 5 + QUAD $0x060f22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 15], 6 + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + QUAD $0x070f3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 15], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x080f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 8 + QUAD $0x090f2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 15], 9 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] + QUAD $0x0a0f22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 15], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c0f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e0f0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 15], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 15 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e0f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f0f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 15 + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] LONG $0x0274b60f; BYTE $0x0f // movzx esi, byte [rdx + rax + 15] LONG $0xde6ef9c5 // vmovd xmm3, esi - QUAD $0x010f1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 15], 1 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x020f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 2 + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x010f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 1 + QUAD $0x020f1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 15], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x030f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x040f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 4 + QUAD $0x050f0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 15], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x060f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 6 + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] + QUAD $0x070f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 7 + QUAD $0x080f1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 15], 8 QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] - QUAD $0x030f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x040f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x050f3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 15], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x060f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 6 - QUAD $0x070f125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 15], 7 - QUAD $0x0000008824948b4c // mov r10, qword [rsp + 136] - QUAD $0x080f125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 15], 8 - QUAD $0x090f225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 15], 9 - QUAD $0x0a0f325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 15], 10 - QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] - QUAD $0x0b0f325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 15], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x090f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a0f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 10 + QUAD $0x0b0f0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 15], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c0f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 12 - QUAD $0x0d0f1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 15], 13 - LONG $0x245c8b4c; BYTE $0x48 // mov r11, qword [rsp + 72] - QUAD $0x0e0f1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 15], 14 - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f0f225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 15], 15 + QUAD $0x0d0f025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 15], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e0f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 14 + QUAD $0x0f0f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 15 LONG $0x387de3c4; WORD $0x01c1 // vinserti128 ymm0, ymm0, xmm1, 1 QUAD $0x00038024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 896], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x0003a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 928], ymm0 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] - LONG $0x3274b60f; BYTE $0x10 // movzx esi, byte [rdx + rsi + 16] + LONG $0x74b60f42; WORD $0x1032 // movzx esi, byte [rdx + r14 + 16] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e8248c8b4c // mov r9, qword [rsp + 232] - QUAD $0x01100a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 16], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 4 - QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] - QUAD $0x051032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 5 - LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] - QUAD $0x061032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 6 - QUAD $0x07102a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 16], 7 - QUAD $0x081002442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 16], 8 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 9 - QUAD $0x0a103a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 16], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b1032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c1032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 12 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + QUAD $0x01100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 1 + QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x021002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x031002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 3 + QUAD $0x041012442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 16], 4 + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x051002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x061002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 6 + QUAD $0x07103a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 16], 7 + QUAD $0x000000e0249c8b4c // mov r11, qword [rsp + 224] + QUAD $0x08101a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 16], 8 + QUAD $0x09102a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 16], 9 + QUAD $0x0a1022442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 16], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b1002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c1002442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 16], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] QUAD $0x0d1032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 13 - QUAD $0x0e100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 15 - QUAD $0x00000108249c8b48 // mov rbx, qword [rsp + 264] + LONG $0x24748b4c; BYTE $0x40 // mov r14, qword [rsp + 64] + QUAD $0x0e1032442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 16], 14 + LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] + QUAD $0x0f1002442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 16], 15 + QUAD $0x00000100249c8b48 // mov rbx, qword [rsp + 256] LONG $0x1a74b60f; BYTE $0x10 // movzx esi, byte [rdx + rbx + 16] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] - QUAD $0x0110024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 16], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] + QUAD $0x0110124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 16], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] QUAD $0x0210324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] - QUAD $0x0310324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x03103a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 16], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0410324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 4 + QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] QUAD $0x05103a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 16], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0610324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0710324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 7 - QUAD $0x0810124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 16], 8 - QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] - QUAD $0x09103a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 16], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x000000f8248c8b4c // mov r9, qword [rsp + 248] + QUAD $0x07100a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 16], 7 + QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] + QUAD $0x08102a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 16], 8 + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x0910324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 9 + LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0a10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 10 - QUAD $0x0b10324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 16], 11 - QUAD $0x0c10024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d10024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 13 - QUAD $0x0e101a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 16], 14 - QUAD $0x0f10224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 16], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x11 // movzx esi, byte [rdx + rax + 17] - LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x01110a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 17], 1 - QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] - QUAD $0x02111a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 17], 2 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] - QUAD $0x031112542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 17], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] - QUAD $0x041102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 4 - QUAD $0x000000f824ac8b4c // mov r13, qword [rsp + 248] - QUAD $0x05112a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 17], 5 - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] - QUAD $0x06110a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 17], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x071102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 7 - QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] - QUAD $0x081132542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 17], 8 - QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] - QUAD $0x09113a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 17], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 11 QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] - QUAD $0x0c1122542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 17], 12 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] - QUAD $0x0d1132542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 17], 13 + QUAD $0x0b10224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 16], 11 LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1132542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 17], 14 - QUAD $0x0f110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 15 + QUAD $0x0c10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 12 + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + QUAD $0x0d10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 13 + QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] + QUAD $0x0e10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x0f10324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 15 + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x11 // movzx esi, byte [rdx + rsi + 17] + LONG $0xd66ef9c5 // vmovd xmm2, esi + QUAD $0x01110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 1 + QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] + QUAD $0x02110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 2 + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x03110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 3 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x04110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 4 + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x05110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 5 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x06110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 6 + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x07110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 7 + QUAD $0x08111a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 17], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x09110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 9 + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x0a110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 11 + QUAD $0x0c1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d1102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 13 + QUAD $0x0e1132542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 17], 14 + QUAD $0x0f1102542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 17], 15 LONG $0x1a74b60f; BYTE $0x11 // movzx esi, byte [rdx + rbx + 17] LONG $0xde6ef9c5 // vmovd xmm3, esi - QUAD $0x0111025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 1 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x02110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 2 + QUAD $0x0111125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 17], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0211025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 17], 2 + QUAD $0x03113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 3 + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x04113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 4 + QUAD $0x05113a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 17], 5 + QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] + QUAD $0x06113a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 17], 6 + QUAD $0x07110a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 17], 7 + QUAD $0x08112a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 17], 8 QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] - QUAD $0x0311025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x0411325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x0511325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] - QUAD $0x0611325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0711325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] - QUAD $0x0811325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 8 - QUAD $0x09113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 9 - QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] - QUAD $0x0a113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] - QUAD $0x0b11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 12 - LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] - QUAD $0x0d11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 13 - LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] - QUAD $0x0e11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 14 + QUAD $0x0911025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x0a110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 10 + QUAD $0x0b11225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 17], 11 + LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] + QUAD $0x0c110a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 17], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d11025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 17], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e11025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 17], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00036024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 864], ymm0 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - QUAD $0x0f1132442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rsi + 17], 15 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f1102442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rax + 17], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00034024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 832], ymm0 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] - LONG $0x3274b60f; BYTE $0x12 // movzx esi, byte [rdx + rsi + 18] - LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 1 - QUAD $0x02121a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 18], 2 - QUAD $0x031212442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 18], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 4 - QUAD $0x05122a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 18], 5 - QUAD $0x06120a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 18], 6 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] - QUAD $0x071232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 7 - QUAD $0x081232442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 18], 8 - QUAD $0x09123a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 18], 9 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] - QUAD $0x0a122a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 18], 10 - QUAD $0x0b1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 11 - QUAD $0x0c1222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 18], 12 - QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] - QUAD $0x0d120a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 18], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0e1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 15 + QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] LONG $0x1a74b60f; BYTE $0x12 // movzx esi, byte [rdx + rbx + 18] - LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24748b4c; BYTE $0x78 // mov r14, qword [rsp + 120] - QUAD $0x0112324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 18], 1 - QUAD $0x02120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 2 - QUAD $0x0312024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 3 + LONG $0xc66ef9c5 // vmovd xmm0, esi LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0412024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0512024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x0612024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 6 - QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] - QUAD $0x07121a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 18], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x0912024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 9 - QUAD $0x0a123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] - QUAD $0x0b12324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c12324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 12 - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] - QUAD $0x0d12224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 18], 13 - LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] - QUAD $0x0e12324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 14 - LONG $0x24548b4c; BYTE $0x38 // mov r10, qword [rsp + 56] - QUAD $0x0f12124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 18], 15 - QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x133a // movzx esi, byte [rdx + r15 + 19] - LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 4 - QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] - QUAD $0x051332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 5 - LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] - QUAD $0x061332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 6 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] - QUAD $0x071332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 7 + QUAD $0x011202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 1 + QUAD $0x0000010824b48b4c // mov r14, qword [rsp + 264] + QUAD $0x021232442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 18], 2 + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] + QUAD $0x031212442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 18], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x041202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 4 + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] + QUAD $0x05121a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 18], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x061202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x071202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 7 + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] + QUAD $0x081202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 8 + QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + QUAD $0x091202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b1202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c1232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d1232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 13 + LONG $0x24648b4c; BYTE $0x40 // mov r12, qword [rsp + 64] + QUAD $0x0e1222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 18], 14 + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] + QUAD $0x0f122a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 18], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x12 // movzx esi, byte [rdx + rsi + 18] + LONG $0xce6ef9c5 // vmovd xmm1, esi QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] - QUAD $0x081332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 8 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 9 - QUAD $0x0a132a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 19], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b1332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] - QUAD $0x0c1332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 12 - QUAD $0x0d130a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 19], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] - QUAD $0x0e133a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 19], 14 - LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] - QUAD $0x0f1302542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 19], 15 + QUAD $0x0112324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0212324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0312324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 3 + QUAD $0x04123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x0512324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 5 + QUAD $0x06123a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 18], 6 + QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] + QUAD $0x07123a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 18], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x0812324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 8 + QUAD $0x0912024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 9 + QUAD $0x0a120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 10 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x0b120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 11 + QUAD $0x0c120a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 18], 12 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x0d120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 13 + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] + QUAD $0x0e120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 14 + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x0f120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 15 LONG $0x1a74b60f; BYTE $0x13 // movzx esi, byte [rdx + rbx + 19] + LONG $0xd66ef9c5 // vmovd xmm2, esi + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + QUAD $0x01130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 1 + QUAD $0x021332542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 19], 2 + QUAD $0x031312542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 19], 3 + LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] + QUAD $0x04130a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 19], 4 + QUAD $0x05131a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 19], 5 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x06130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 6 + QUAD $0x000000d024848b4c // mov r8, qword [rsp + 208] + QUAD $0x071302542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 19], 7 + QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] + QUAD $0x081332542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 19], 8 + QUAD $0x000000e824948b4c // mov r10, qword [rsp + 232] + QUAD $0x091312542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 19], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1332542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 19], 10 + QUAD $0x0b1302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 11 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c1302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 12 + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x0d131a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 19], 13 + QUAD $0x0e1322542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 19], 14 + QUAD $0x0f132a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 19], 15 + QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] + LONG $0x0274b60f; BYTE $0x13 // movzx esi, byte [rdx + rax + 19] LONG $0xde6ef9c5 // vmovd xmm3, esi - QUAD $0x0113325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 19], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x0213325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 2 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] - QUAD $0x03131a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 19], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x0413325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x0513325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 5 - QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] - QUAD $0x06132a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 19], 6 - QUAD $0x07131a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 19], 7 - QUAD $0x08130a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 19], 8 + QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + QUAD $0x01131a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 19], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0213025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0313025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 3 + QUAD $0x04133a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 19], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x0513025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 5 + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] + QUAD $0x06133a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 19], 6 + QUAD $0x07133a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 19], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0813025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 8 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0913025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 11 - QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] - QUAD $0x0c130a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 19], 12 - QUAD $0x0d13225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 19], 13 - LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] - QUAD $0x0e13325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 19], 14 - QUAD $0x0f13125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 19], 15 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0c13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00030024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 768], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x00032024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 800], ymm0 - LONG $0x74b60f42; WORD $0x143a // movzx esi, byte [rdx + r15 + 20] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x0274b60f; BYTE $0x14 // movzx esi, byte [rdx + rax + 20] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e8249c8b4c // mov r11, qword [rsp + 232] - QUAD $0x01141a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 20], 1 - QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] - QUAD $0x021422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 20], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x031402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 3 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] - QUAD $0x04140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 4 - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x051412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 20], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x061402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 6 + LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] + QUAD $0x011422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 20], 1 + QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + QUAD $0x021402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 2 QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x071402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] - QUAD $0x081402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 8 + QUAD $0x031402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 3 + QUAD $0x04140a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 20], 4 QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x091402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 9 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0a1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 10 + QUAD $0x051402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 5 + QUAD $0x06140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 6 + QUAD $0x071402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 20], 7 + QUAD $0x081432442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 20], 8 + QUAD $0x091412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 20], 9 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] - QUAD $0x0d1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 13 - QUAD $0x0e143a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 20], 14 - QUAD $0x0f1402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 20], 15 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - LONG $0x0274b60f; BYTE $0x14 // movzx esi, byte [rdx + rax + 20] + QUAD $0x0a1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 11 + LONG $0x246c8b4c; BYTE $0x50 // mov r13, qword [rsp + 80] + QUAD $0x0c142a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 20], 12 + QUAD $0x0d141a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 20], 13 + LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] + QUAD $0x0e141a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 20], 14 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x0f140a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 20], 15 + QUAD $0x00000100248c8b4c // mov r9, qword [rsp + 256] + LONG $0x74b60f42; WORD $0x140a // movzx esi, byte [rdx + r9 + 20] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x0114024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 20], 1 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x0214024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 20], 2 - QUAD $0x03141a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 20], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0414024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 20], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x05143a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 20], 5 - QUAD $0x06142a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 20], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x01141a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 20], 1 + LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] + QUAD $0x0214024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 20], 2 + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + QUAD $0x03143a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 20], 3 + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x04140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 4 + QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] + QUAD $0x05140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 5 + QUAD $0x06143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 6 + QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] QUAD $0x0714324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x0814324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 8 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x0914324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] - QUAD $0x0a14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 10 - QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] - QUAD $0x0b14024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 20], 11 - QUAD $0x0c140a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 20], 12 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] - QUAD $0x0d142a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 20], 13 - QUAD $0x0e14324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 20], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - QUAD $0x0f14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 15 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] + QUAD $0x0a14124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 20], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 11 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x0c14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 12 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0d143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 13 + QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] + QUAD $0x0e14324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 20], 14 + QUAD $0x0000014024b48b4c // mov r14, qword [rsp + 320] + QUAD $0x0f14324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 20], 15 + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] LONG $0x3274b60f; BYTE $0x15 // movzx esi, byte [rdx + rsi + 21] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x01151a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 21], 1 - QUAD $0x021522542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 21], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 3 - QUAD $0x04150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 4 - QUAD $0x051512542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 21], 5 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] - QUAD $0x06153a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 21], 6 + QUAD $0x011522542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 21], 1 + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] + QUAD $0x021532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 2 QUAD $0x000000c8249c8b4c // mov r11, qword [rsp + 200] - QUAD $0x07151a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 21], 7 - QUAD $0x000000c024a48b4c // mov r12, qword [rsp + 192] - QUAD $0x081522542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 21], 8 - QUAD $0x000000b824948b4c // mov r10, qword [rsp + 184] - QUAD $0x091512542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 21], 9 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x0a150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 10 - LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] - QUAD $0x0b1532542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 21], 11 - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] - QUAD $0x0c150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 12 - QUAD $0x000000d0249c8b48 // mov rbx, qword [rsp + 208] - QUAD $0x0d151a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 21], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f150a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 21], 15 - QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] - LONG $0x0a74b60f; BYTE $0x15 // movzx esi, byte [rdx + rcx + 21] + QUAD $0x03151a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 21], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x041532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 4 + QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] + QUAD $0x051532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x061532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x071532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x081532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 8 + QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + QUAD $0x091532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 9 + QUAD $0x0a1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 10 + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0b1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 11 + QUAD $0x0c152a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 21], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 13 + QUAD $0x0e151a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 21], 14 + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] + QUAD $0x0f152a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 21], 15 + LONG $0x74b60f42; WORD $0x150a // movzx esi, byte [rdx + r9 + 21] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x01150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 1 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x02150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] - QUAD $0x03150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 3 - QUAD $0x0415025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 4 - QUAD $0x05153a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 21], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x0615025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 6 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x07153a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 21], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x08150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 8 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x0115025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 1 + QUAD $0x0215025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 21], 2 + QUAD $0x03153a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 21], 3 + QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] + QUAD $0x04150a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 21], 4 + QUAD $0x05150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 5 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x0915025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a15025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 10 - QUAD $0x0b15025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 21], 11 - QUAD $0x0c150a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 21], 12 - QUAD $0x0d152a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 21], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] - QUAD $0x0e15025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 14 + QUAD $0x0615025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 6 + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] + QUAD $0x0715025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0815025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 8 + QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x0915225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 21], 9 + QUAD $0x0a15125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 21], 10 + QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] + QUAD $0x0b151a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 21], 11 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x0c150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 12 + QUAD $0x0d153a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 21], 13 + QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] + QUAD $0x0e150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x0002c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 704], ymm0 - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] - QUAD $0x0f1502442061a3c4 // vpinsrb xmm0, xmm3, byte [rdx + r8 + 21], 15 + QUAD $0x0f1532442061a3c4 // vpinsrb xmm0, xmm3, byte [rdx + r14 + 21], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x0002e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 736], ymm0 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x16 // movzx esi, byte [rdx + rax + 22] + QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] + LONG $0x74b60f42; WORD $0x1612 // movzx esi, byte [rdx + r10 + 22] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 2 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x031632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 3 - QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] - QUAD $0x041632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 4 - QUAD $0x000000f824ac8b4c // mov r13, qword [rsp + 248] - QUAD $0x05162a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 22], 5 - QUAD $0x06163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 6 + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] + QUAD $0x01163a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 22], 1 + QUAD $0x0000010824848b4c // mov r8, qword [rsp + 264] + QUAD $0x021602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 22], 2 + QUAD $0x03161a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 22], 3 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x04160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 4 + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x05160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 5 + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x061632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 22], 6 + QUAD $0x000000d0249c8b4c // mov r11, qword [rsp + 208] QUAD $0x07161a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 22], 7 - QUAD $0x081622442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 22], 8 - QUAD $0x091612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 22], 9 - QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] - QUAD $0x0a1622442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 22], 10 - QUAD $0x0b1632442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 22], 11 - QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] - QUAD $0x0c161a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 22], 12 - QUAD $0x0d161a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 22], 13 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 14 + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x08163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x09160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 9 + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x0a160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 10 + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x0b160a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 22], 11 LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] - QUAD $0x0f1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 15 - QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] - LONG $0x74b60f42; WORD $0x1612 // movzx esi, byte [rdx + r10 + 22] + QUAD $0x0c1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 14 + QUAD $0x0f162a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 22], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x16 // movzx esi, byte [rdx + rsi + 22] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] QUAD $0x0116324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 1 - LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] - QUAD $0x02161a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 22], 2 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0216324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0316324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x0416324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 4 - LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] - QUAD $0x0516324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 22], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x04160a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 22], 4 + QUAD $0x0000008824ac8b4c // mov r13, qword [rsp + 136] + QUAD $0x05162a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 22], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0616324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 6 - QUAD $0x07163a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 22], 7 - QUAD $0x08160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 8 - QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - QUAD $0x09160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] - QUAD $0x0a160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 10 - QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] - QUAD $0x0b160a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 22], 11 - QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] - QUAD $0x0c160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 12 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] - QUAD $0x0d160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 13 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] - QUAD $0x0e163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 14 - QUAD $0x0f16024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 22], 15 - LONG $0x0274b60f; BYTE $0x17 // movzx esi, byte [rdx + rax + 23] + QUAD $0x000000f8248c8b4c // mov r9, qword [rsp + 248] + QUAD $0x07160a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 22], 7 + QUAD $0x0816024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 8 + QUAD $0x0916224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 22], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 10 + QUAD $0x0b161a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 22], 11 + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + QUAD $0x0c16224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 22], 12 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0e16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 14 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0f16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 15 + LONG $0x74b60f42; WORD $0x1712 // movzx esi, byte [rdx + r10 + 23] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] - QUAD $0x011702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x021702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 2 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x03173a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 23], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x01173a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 23], 1 + QUAD $0x021702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 23], 2 + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] + QUAD $0x031712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 23], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x041702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 4 - QUAD $0x05172a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 23], 5 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x06170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 6 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] - QUAD $0x07170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x08170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x09170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 9 - QUAD $0x0a1722542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 23], 10 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x051702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 5 + QUAD $0x061732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 23], 6 + QUAD $0x07171a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 23], 7 + QUAD $0x08173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 8 + QUAD $0x000000e824b48b4c // mov r14, qword [rsp + 232] + QUAD $0x091732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 23], 9 + LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x0a173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 10 QUAD $0x0b170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 11 - QUAD $0x0c171a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 23], 12 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] - QUAD $0x0d170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 14 - LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] - QUAD $0x0f1722542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 23], 15 - LONG $0x74b60f42; WORD $0x1712 // movzx esi, byte [rdx + r10 + 23] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0c1702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 12 + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x0d171a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 23], 13 + LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] + QUAD $0x0e171a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 23], 14 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x0f1702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 15 + QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] + LONG $0x74b60f42; WORD $0x173a // movzx esi, byte [rdx + r15 + 23] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] - QUAD $0x01171a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 23], 1 - QUAD $0x02171a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 23], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] - QUAD $0x03170a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 23], 3 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] - QUAD $0x04171a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 23], 4 - QUAD $0x0517325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 23], 5 - QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] + QUAD $0x01170a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 23], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x0217025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 23], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0317325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 3 + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0417325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 4 + QUAD $0x05172a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 23], 5 + QUAD $0x0000009824ac8b4c // mov r13, qword [rsp + 152] QUAD $0x06172a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 23], 6 - QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x0717325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x07170a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 23], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x0817325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 8 - QUAD $0x0000009824848b4c // mov r8, qword [rsp + 152] + QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] QUAD $0x0917025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 23], 9 - QUAD $0x0000014024948b4c // mov r10, qword [rsp + 320] - QUAD $0x0a17125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 23], 10 - QUAD $0x0b170a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 23], 11 + LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] + QUAD $0x0a17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 11 + QUAD $0x0c17225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 23], 12 + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + QUAD $0x0d17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 13 QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 12 - LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] - QUAD $0x0d17325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 23], 13 - QUAD $0x0e173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0e17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] QUAD $0x0f17325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 15 LONG $0x387563c4; WORD $0x01d0 // vinserti128 ymm10, ymm1, xmm0, 1 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x0002a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 672], ymm0 - QUAD $0x00000100248c8b4c // mov r9, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x180a // movzx esi, byte [rdx + r9 + 24] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x18 // movzx esi, byte [rdx + rsi + 24] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x011832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x021832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 2 - QUAD $0x03183a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 24], 3 - QUAD $0x041802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x051802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x061802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 6 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x071802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 7 - QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] - QUAD $0x08183a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 24], 8 + QUAD $0x031812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 24], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x041832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 4 QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 9 + QUAD $0x051832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x061832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x071832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 7 QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] - QUAD $0x0a1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x0b1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 11 - QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x081832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 8 + QUAD $0x091832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 24], 9 + QUAD $0x0a183a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 24], 10 + QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] + QUAD $0x0b1832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 24], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] QUAD $0x0c1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 12 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] - QUAD $0x0d1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 13 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 14 - QUAD $0x0f1822442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 24], 15 - QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] - LONG $0x3274b60f; BYTE $0x18 // movzx esi, byte [rdx + rsi + 24] + QUAD $0x0d181a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 24], 13 + QUAD $0x0e181a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 24], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f1832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 24], 15 + LONG $0x74b60f42; WORD $0x183a // movzx esi, byte [rdx + r15 + 24] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x01181a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 24], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x0218324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 2 - QUAD $0x03180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 3 - QUAD $0x04181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 4 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] - QUAD $0x05180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 5 + QUAD $0x01180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 1 + QUAD $0x0218024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0318024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 3 + QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] + QUAD $0x0418224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 24], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x0518024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 5 QUAD $0x06182a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 24], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x07180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 7 - QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] + QUAD $0x07180a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 24], 7 + QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] QUAD $0x08183a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 24], 8 QUAD $0x0918024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 9 - QUAD $0x0a18124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 24], 10 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] - QUAD $0x0b180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 11 - QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a18024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 10 + QUAD $0x0000008024848b4c // mov r8, qword [rsp + 128] + QUAD $0x0b18024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 11 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x0c180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 12 - QUAD $0x0d18324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 24], 13 - LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] - QUAD $0x0e18024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 14 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] - QUAD $0x0f180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 15 - LONG $0x74b60f42; WORD $0x190a // movzx esi, byte [rdx + r9 + 25] - LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] - QUAD $0x01190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x02190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 2 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] - QUAD $0x03190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 3 - QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] - QUAD $0x04191a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 25], 4 - QUAD $0x000000f8248c8b4c // mov r9, qword [rsp + 248] - QUAD $0x05190a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 25], 5 - LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] - QUAD $0x061922542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 25], 6 - QUAD $0x071902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 7 - QUAD $0x08193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 8 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x091902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 9 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] - QUAD $0x0a192a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 25], 10 - LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] - QUAD $0x0b191a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 25], 11 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] - QUAD $0x0c1932542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 25], 12 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] - QUAD $0x0d190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0e1902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 15 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] + LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + QUAD $0x0d18124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 24], 13 + QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] + QUAD $0x0e183a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 24], 14 + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] + QUAD $0x0f181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 15 + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x0274b60f; BYTE $0x19 // movzx esi, byte [rdx + rax + 25] + LONG $0xd66ef9c5 // vmovd xmm2, esi + LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] + QUAD $0x01192a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 25], 1 + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] + QUAD $0x021932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 3 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x041932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 4 + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] + QUAD $0x05191a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 25], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x061932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 6 + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x071932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 7 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x081932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 8 + QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] + QUAD $0x091932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 10 + QUAD $0x0b1932542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 25], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f1932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3274b60f; BYTE $0x19 // movzx esi, byte [rdx + rsi + 25] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] - QUAD $0x01193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 1 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x0219025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] - QUAD $0x0319025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 3 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x0419025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0519025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] - QUAD $0x0619025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x0719025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 7 + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x0119325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x0219325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 2 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0319325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 3 + QUAD $0x0419225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 25], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x0519325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x0619325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 6 + QUAD $0x07190a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 25], 7 QUAD $0x08193a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 25], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x0919025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 9 - QUAD $0x0a19125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 25], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] - QUAD $0x0b19025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 25], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c19325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 25], 12 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x09193a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 25], 9 + LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] + QUAD $0x0a190a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 25], 10 + QUAD $0x0b19025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 25], 11 + QUAD $0x0c190a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 25], 12 QUAD $0x0d19125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 25], 13 - QUAD $0x0e19025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 25], 14 + QUAD $0x0e193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 14 LONG $0x387563c4; WORD $0x01c8 // vinserti128 ymm9, ymm1, xmm0, 1 - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] - QUAD $0x0f1902442061a3c4 // vpinsrb xmm0, xmm3, byte [rdx + r8 + 25], 15 + QUAD $0x0f191a442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rbx + 25], 15 LONG $0x387d63c4; WORD $0x01c2 // vinserti128 ymm8, ymm0, xmm2, 1 - QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] - LONG $0x3274b60f; BYTE $0x1a // movzx esi, byte [rdx + rsi + 26] + LONG $0x0274b60f; BYTE $0x1a // movzx esi, byte [rdx + rax + 26] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x011a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 1 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] - QUAD $0x021a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 2 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x031a3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 26], 3 - QUAD $0x041a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 4 - QUAD $0x051a0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 26], 5 - QUAD $0x061a22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 26], 6 - QUAD $0x000000c8249c8b4c // mov r11, qword [rsp + 200] - QUAD $0x071a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 7 - QUAD $0x000000c0248c8b4c // mov r9, qword [rsp + 192] - QUAD $0x081a0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 26], 8 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] - QUAD $0x091a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 9 - QUAD $0x0a1a2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 26], 10 - QUAD $0x0b1a1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 26], 11 - QUAD $0x0c1a32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 26], 12 - QUAD $0x0d1a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e1a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 14 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] - QUAD $0x0f1a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 15 - QUAD $0x00000108249c8b48 // mov rbx, qword [rsp + 264] - LONG $0x1a74b60f; BYTE $0x1a // movzx esi, byte [rdx + rbx + 26] + QUAD $0x011a2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 26], 1 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x021a12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 26], 2 + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x031a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x041a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 4 + QUAD $0x051a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 5 + QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] + QUAD $0x061a1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 26], 6 + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x071a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 7 + QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] + QUAD $0x081a32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 26], 8 + QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] + QUAD $0x091a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 10 + QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x0b1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 11 + LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] + QUAD $0x0c1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 12 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0d1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 14 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0f1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 15 + QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] + LONG $0x3a74b60f; BYTE $0x1a // movzx esi, byte [rdx + rdi + 26] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x011a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 1 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x021a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 2 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] - QUAD $0x031a224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 26], 3 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] - QUAD $0x041a2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 26], 4 - LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] - QUAD $0x051a324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 26], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] - QUAD $0x061a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 6 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x071a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 7 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x081a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 8 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] - QUAD $0x091a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 9 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] - QUAD $0x0a1a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 10 - QUAD $0x0b1a024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 26], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c1a024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 26], 12 - QUAD $0x0d1a124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 26], 13 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] - QUAD $0x0e1a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 14 - QUAD $0x0f1a024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 26], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x0274b60f; BYTE $0x1b // movzx esi, byte [rdx + rax + 27] + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x011a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 1 + LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] + QUAD $0x021a024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 26], 2 + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + QUAD $0x031a1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 26], 3 + QUAD $0x041a224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 26], 4 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x051a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 5 + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x061a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 6 + QUAD $0x000000f824a48b4c // mov r12, qword [rsp + 248] + QUAD $0x071a224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 26], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x081a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 8 + QUAD $0x091a3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 26], 9 + QUAD $0x0a1a0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 26], 10 + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] + QUAD $0x0b1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 11 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x0c1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 12 + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + QUAD $0x0d1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 13 + QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] + QUAD $0x0e1a0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 26], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x0f1a324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 26], 15 + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x1b // movzx esi, byte [rdx + rsi + 27] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] - QUAD $0x011b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 27], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x021b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 2 - QUAD $0x031b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 27], 3 - QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] - QUAD $0x041b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 27], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x051b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 5 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x061b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 6 - QUAD $0x071b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 27], 7 - QUAD $0x081b0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 27], 8 - QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] - QUAD $0x091b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 27], 9 - QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] - QUAD $0x0a1b0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 27], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 12 + QUAD $0x011b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 27], 1 + QUAD $0x021b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 27], 2 + WORD $0x894d; BYTE $0xd5 // mov r13, r10 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 27], 3 + QUAD $0x041b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 4 + QUAD $0x000000b824948b4c // mov r10, qword [rsp + 184] + QUAD $0x051b12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 27], 5 + QUAD $0x061b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 27], 6 QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x071b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 7 + QUAD $0x081b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 27], 8 + QUAD $0x091b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 9 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0a1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 10 + QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] + QUAD $0x0b1b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 27], 11 + LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] + QUAD $0x0c1b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0e1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 15 - LONG $0x1a74b60f; BYTE $0x1b // movzx esi, byte [rdx + rbx + 27] + LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] + QUAD $0x0f1b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 27], 15 + LONG $0x3a74b60f; BYTE $0x1b // movzx esi, byte [rdx + rdi + 27] LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x011b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 1 - QUAD $0x021b3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 27], 2 - QUAD $0x031b225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 27], 3 - QUAD $0x041b2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 27], 4 - QUAD $0x051b325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 27], 5 - QUAD $0x000000a024a48b4c // mov r12, qword [rsp + 160] - QUAD $0x061b225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 27], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x071b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] + QUAD $0x011b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 1 + QUAD $0x021b025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 27], 2 + QUAD $0x031b1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 27], 3 + QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] + QUAD $0x041b1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 27], 4 + QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] + QUAD $0x051b3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 27], 5 + QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] + QUAD $0x061b3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 27], 6 + QUAD $0x071b225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 27], 7 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x081b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 8 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x091b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0a1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 10 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x0b1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0c1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 12 LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0d1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 13 - QUAD $0x0e1b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 14 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] - QUAD $0x0f1b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 15 + QUAD $0x0e1b0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 27], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + QUAD $0x0f1b325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 27], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00022024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 544], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x00024024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 576], ymm0 - QUAD $0x0000010024ac8b4c // mov r13, qword [rsp + 256] - LONG $0x74b60f42; WORD $0x1c2a // movzx esi, byte [rdx + r13 + 28] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x1c // movzx esi, byte [rdx + rsi + 28] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x011c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 1 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x021c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 2 - LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] - QUAD $0x031c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 28], 3 - QUAD $0x041c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 4 - QUAD $0x000000f824b48b4c // mov r14, qword [rsp + 248] - QUAD $0x051c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 5 - LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x011c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 1 + QUAD $0x021c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 28], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 3 + LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] + QUAD $0x041c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 28], 4 + QUAD $0x051c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] QUAD $0x061c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 6 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] - QUAD $0x071c1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 28], 7 - QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] - QUAD $0x081c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 8 - QUAD $0x091c3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 28], 9 - QUAD $0x0a1c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 28], 10 - LONG $0x24548b4c; BYTE $0x58 // mov r10, qword [rsp + 88] - QUAD $0x0b1c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 11 - QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] - QUAD $0x0c1c3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 28], 12 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] - QUAD $0x0d1c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 13 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0e1c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 14 - LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] - QUAD $0x0f1c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 15 - QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] - LONG $0x3274b60f; BYTE $0x1c // movzx esi, byte [rdx + rsi + 28] + QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] + QUAD $0x071c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 28], 7 + QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] + QUAD $0x081c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 28], 8 + QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] + QUAD $0x091c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 10 + QUAD $0x0b1c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 11 + QUAD $0x0c1c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 12 + LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] + QUAD $0x0d1c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 13 + QUAD $0x0e1c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 28], 14 + QUAD $0x0f1c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 28], 15 + QUAD $0x00000100249c8b4c // mov r11, qword [rsp + 256] + LONG $0x74b60f42; WORD $0x1c1a // movzx esi, byte [rdx + r11 + 28] LONG $0xce6ef9c5 // vmovd xmm1, esi - LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] - QUAD $0x011c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 1 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x021c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 2 - QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] - QUAD $0x031c0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 28], 3 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x041c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x051c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 5 - QUAD $0x061c224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 28], 6 - QUAD $0x071c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 7 - QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] - QUAD $0x081c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x091c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0a1c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] - QUAD $0x0b1c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 11 - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0c1c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 12 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x011c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x021c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x031c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 3 + QUAD $0x041c1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 28], 4 + QUAD $0x051c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 5 + QUAD $0x061c3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 28], 6 + QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] + QUAD $0x071c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 7 + QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] + QUAD $0x081c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 8 + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x091c1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 28], 9 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x0a1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 10 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x0b1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 11 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x0c1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 12 LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0d1c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 13 - LONG $0x24648b4c; BYTE $0x48 // mov r12, qword [rsp + 72] - QUAD $0x0e1c224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 28], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] + QUAD $0x0e1c124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 28], 14 + QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] QUAD $0x0f1c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 15 - LONG $0x74b60f42; WORD $0x1d2a // movzx esi, byte [rdx + r13 + 29] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] + LONG $0x3274b60f; BYTE $0x1d // movzx esi, byte [rdx + rsi + 29] LONG $0xd66ef9c5 // vmovd xmm2, esi - QUAD $0x000000e824ac8b4c // mov r13, qword [rsp + 232] - QUAD $0x011d2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 29], 1 - QUAD $0x021d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 2 - QUAD $0x031d1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 29], 3 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] - QUAD $0x041d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 4 - QUAD $0x051d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 5 - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] - QUAD $0x061d1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 29], 6 - QUAD $0x071d1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 29], 7 - QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] - QUAD $0x081d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 8 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x091d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 9 - QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] - QUAD $0x0a1d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 10 - QUAD $0x0b1d12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 29], 11 - QUAD $0x0c1d3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 29], 12 - QUAD $0x0d1d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 13 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] - QUAD $0x0e1d1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 29], 14 - QUAD $0x0f1d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 15 - QUAD $0x0000010824848b4c // mov r8, qword [rsp + 264] - LONG $0x74b60f42; WORD $0x1d02 // movzx esi, byte [rdx + r8 + 29] - LONG $0xde6ef9c5 // vmovd xmm3, esi - LONG $0x247c8b4c; BYTE $0x78 // mov r15, qword [rsp + 120] - QUAD $0x011d3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 29], 1 - LONG $0x24548b4c; BYTE $0x40 // mov r10, qword [rsp + 64] - QUAD $0x021d125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 29], 2 - QUAD $0x031d0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 29], 3 - LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] - QUAD $0x041d0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 29], 4 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x051d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 5 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] - QUAD $0x061d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 6 + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x011d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 1 + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] + QUAD $0x021d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 2 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x031d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 3 + QUAD $0x041d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 4 + QUAD $0x000000b8248c8b4c // mov r9, qword [rsp + 184] + QUAD $0x051d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 5 QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] - QUAD $0x071d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 7 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] - QUAD $0x081d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 8 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] - QUAD $0x091d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 9 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] - QUAD $0x0a1d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 10 - QUAD $0x0b1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0c1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 13 - QUAD $0x0e1d22642061a3c4 // vpinsrb xmm4, xmm3, byte [rdx + r12 + 29], 14 + QUAD $0x061d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 6 + QUAD $0x071d2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 29], 7 + QUAD $0x081d22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 29], 8 + QUAD $0x091d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 9 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0a1d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 10 + QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] + QUAD $0x0b1d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 11 + LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] + QUAD $0x0c1d22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 29], 12 + QUAD $0x0d1d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 13 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x0e1d32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 29], 14 + LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] + QUAD $0x0f1d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 15 + LONG $0x74b60f42; WORD $0x1d1a // movzx esi, byte [rdx + r11 + 29] + LONG $0xde6ef9c5 // vmovd xmm3, esi + QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + QUAD $0x011d1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 29], 1 + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + QUAD $0x021d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 2 + QUAD $0x031d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 3 + QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] + QUAD $0x041d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 29], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x051d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 5 + QUAD $0x061d3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 29], 6 + QUAD $0x071d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 7 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x081d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 8 + QUAD $0x091d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 29], 9 + WORD $0x8949; BYTE $0xdf // mov r15, rbx + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0a1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 10 + QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] + QUAD $0x0b1d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 11 + QUAD $0x0c1d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 12 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x0d1d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 13 + QUAD $0x0e1d12642061a3c4 // vpinsrb xmm4, xmm3, byte [rdx + r10 + 29], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00028024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 640], ymm0 - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x0f1d22442059a3c4 // vpinsrb xmm0, xmm4, byte [rdx + r12 + 29], 15 + QUAD $0x0000014024948b4c // mov r10, qword [rsp + 320] + QUAD $0x0f1d12442059a3c4 // vpinsrb xmm0, xmm4, byte [rdx + r10 + 29], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00026024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 608], ymm0 - QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] - LONG $0x3a74b60f; BYTE $0x1e // movzx esi, byte [rdx + rdi + 30] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x0274b60f; BYTE $0x1e // movzx esi, byte [rdx + rax + 30] LONG $0xc66ef9c5 // vmovd xmm0, esi - QUAD $0x011e2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 30], 1 - LONG $0x3a74b60f; BYTE $0x1f // movzx esi, byte [rdx + rdi + 31] + LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + QUAD $0x011e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 30], 1 + LONG $0x0274b60f; BYTE $0x1f // movzx esi, byte [rdx + rax + 31] LONG $0xce6ef9c5 // vmovd xmm1, esi - QUAD $0x011f2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 31], 1 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x011f1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 31], 1 + QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] QUAD $0x021e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 2 QUAD $0x021f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 2 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x031e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 3 QUAD $0x031f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 3 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x041e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 4 QUAD $0x041f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 4 - QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] - QUAD $0x051e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 5 - QUAD $0x051f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 5 - QUAD $0x061e1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 30], 6 - QUAD $0x061f1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 31], 6 - QUAD $0x0000011024bc8b48 // mov rdi, qword [rsp + 272] - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + WORD $0x894c; BYTE $0xc8 // mov rax, r9 + QUAD $0x051e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 30], 5 + QUAD $0x051f0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 31], 5 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x061e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 6 + QUAD $0x061f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 6 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] QUAD $0x071e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 7 QUAD $0x071f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 7 - QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x00000110249c8b48 // mov rbx, qword [rsp + 272] + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] QUAD $0x081e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 8 QUAD $0x081f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 8 - QUAD $0x091e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 30], 9 - QUAD $0x091f0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 31], 9 - QUAD $0x0a1e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 30], 10 - QUAD $0x0a1f324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 31], 10 + QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] + QUAD $0x091e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 9 + QUAD $0x091f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 9 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 11 - QUAD $0x0b1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 11 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x0c1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 12 - QUAD $0x0c1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 12 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0a1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 10 + QUAD $0x0a1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 10 + WORD $0x894c; BYTE $0xc0 // mov rax, r8 + QUAD $0x0b1e02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 30], 11 + QUAD $0x0b1f024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 31], 11 + QUAD $0x0c1e22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 30], 12 + QUAD $0x0c1f224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 31], 12 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 13 QUAD $0x0d1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 13 - WORD $0x8948; BYTE $0xd8 // mov rax, rbx - QUAD $0x0e1e1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 30], 14 - QUAD $0x0e1f1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 31], 14 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0f1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 15 - QUAD $0x0f1f02542071e3c4 // vpinsrb xmm2, xmm1, byte [rdx + rax + 31], 15 - WORD $0x894c; BYTE $0xc6 // mov rsi, r8 - LONG $0x44b60f42; WORD $0x1e02 // movzx eax, byte [rdx + r8 + 30] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0e1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 14 + QUAD $0x0e1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 14 + QUAD $0x0f1e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 30], 15 + QUAD $0x0f1f32542071a3c4 // vpinsrb xmm2, xmm1, byte [rdx + r14 + 31], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x3244b60f; BYTE $0x1e // movzx eax, byte [rdx + rsi + 30] LONG $0xc86ef9c5 // vmovd xmm1, eax - QUAD $0x011e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 30], 1 - LONG $0x44b60f42; WORD $0x1f02 // movzx eax, byte [rdx + r8 + 31] + QUAD $0x011e1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 30], 1 + LONG $0x3244b60f; BYTE $0x1f // movzx eax, byte [rdx + rsi + 31] LONG $0xf86ef9c5 // vmovd xmm7, eax - QUAD $0x011f3a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 31], 1 - QUAD $0x021e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 30], 2 - QUAD $0x021f127c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r10 + 31], 2 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x011f1a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r11 + 31], 1 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + QUAD $0x021e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 2 + QUAD $0x021f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 2 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x031e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 3 QUAD $0x031f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 3 - QUAD $0x041e0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 30], 4 - QUAD $0x041f0a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r9 + 31], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x041e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 30], 4 + QUAD $0x041f2a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 31], 4 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x051e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 5 QUAD $0x051f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 5 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x061e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 6 QUAD $0x061f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] QUAD $0x071e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 7 QUAD $0x071f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 7 - QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x081e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 8 QUAD $0x081f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 8 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x091e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 9 - QUAD $0x091f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 9 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x091e3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 30], 9 + QUAD $0x091f3a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 31], 9 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 10 QUAD $0x0a1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 10 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] - QUAD $0x0b1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 11 - QUAD $0x0b1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 11 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0b1e3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 30], 11 + QUAD $0x0b1f3a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rdi + 31], 11 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0c1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 12 QUAD $0x0c1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 12 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 13 - QUAD $0x0d1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 13 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + WORD $0x8948; BYTE $0xc8 // mov rax, rcx + QUAD $0x0d1e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 30], 13 + QUAD $0x0d1f0a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rcx + 31], 13 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0e1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 14 QUAD $0x0e1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 14 - WORD $0x894c; BYTE $0xe0 // mov rax, r12 - QUAD $0x0f1e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 30], 15 - QUAD $0x0f1f227c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r12 + 31], 15 + QUAD $0x0f1e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 30], 15 + QUAD $0x0f1f127c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r10 + 31], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00014024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 320], ymm0 LONG $0x3845e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm7, xmm2, 1 @@ -16709,18 +17362,18 @@ LBB2_169: LONG $0x3865e3c4; WORD $0x01e0 // vinserti128 ymm4, ymm3, xmm0, 1 LONG $0x4665e3c4; WORD $0x31c0 // vperm2i128 ymm0, ymm3, ymm0, 49 QUAD $0x00000198248c8b48 // mov rcx, qword [rsp + 408] - LONG $0x447ffec5; WORD $0x608f // vmovdqu yword [rdi + 4*rcx + 96], ymm0 - LONG $0x547ffec5; WORD $0x408f // vmovdqu yword [rdi + 4*rcx + 64], ymm2 - LONG $0x647ffec5; WORD $0x208f // vmovdqu yword [rdi + 4*rcx + 32], ymm4 - LONG $0x0c7ffec5; BYTE $0x8f // vmovdqu yword [rdi + 4*rcx], ymm1 + LONG $0x447ffec5; WORD $0x608b // vmovdqu yword [rbx + 4*rcx + 96], ymm0 + LONG $0x547ffec5; WORD $0x408b // vmovdqu yword [rbx + 4*rcx + 64], ymm2 + LONG $0x647ffec5; WORD $0x208b // vmovdqu yword [rbx + 4*rcx + 32], ymm4 + LONG $0x0c7ffec5; BYTE $0x8b // vmovdqu yword [rbx + 4*rcx], ymm1 LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB2_169 + JNE LBB2_170 QUAD $0x0000018824bc8b4c // mov r15, qword [rsp + 392] QUAD $0x0000018024bc3b4c // cmp r15, qword [rsp + 384] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - LONG $0x24748b44; BYTE $0x1c // mov r14d, dword [rsp + 28] + LONG $0x245c8b44; BYTE $0x1c // mov r11d, dword [rsp + 28] QUAD $0x0000019024a48b4c // mov r12, qword [rsp + 400] JNE LBB2_114 JMP LBB2_133 @@ -16735,8 +17388,9 @@ TEXT ·_comparison_not_equal_arr_arr_avx2(SB), $80-48 MOVQ offset+40(FP), R9 ADDQ $8, SP - WORD $0x894d; BYTE $0xc3 // mov r11, r8 - WORD $0x8949; BYTE $0xce // mov r14, rcx + WORD $0x8944; BYTE $0xc8 // mov eax, r9d + WORD $0x894d; BYTE $0xc6 // mov r14, r8 + WORD $0x8949; BYTE $0xcc // mov r12, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB3_29 WORD $0xff83; BYTE $0x03 // cmp edi, 3 @@ -16747,16 +17401,16 @@ TEXT ·_comparison_not_equal_arr_arr_avx2(SB), $80-48 JE LBB3_79 WORD $0xff83; BYTE $0x06 // cmp edi, 6 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_22 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_20: WORD $0x0e8b // mov ecx, dword [rsi] @@ -16769,7 +17423,7 @@ LBB3_20: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -16778,49 +17432,49 @@ LBB3_20: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_20 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_22: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_26 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_24: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5950f41 // setne r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0950f41 // setne r8b @@ -16832,165 +17486,165 @@ LBB3_24: LONG $0xd7950f41 // setne r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2950f41 // setne r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6950f41 // setne r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4950f41 // setne r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1950f41 // setne r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_24 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_26: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_28: @@ -17001,16 +17655,16 @@ LBB3_28: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_28 JMP LBB3_123 @@ -17023,266 +17677,361 @@ LBB3_29: JE LBB3_112 WORD $0xff83; BYTE $0x0c // cmp edi, 12 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_50 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB3_48: LONG $0x0610fbc5 // vmovsd xmm0, qword [rsi] LONG $0x08c68348 // add rsi, 8 LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd2950f41 // setne r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xcb08 // or bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB3_48 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_50: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_54 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB3_52: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x0610fbc5 // vmovsd xmm0, qword [rsi] - LONG $0x4e10fbc5; BYTE $0x08 // vmovsd xmm1, qword [rsi + 8] LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] - LONG $0x4a2ef9c5; BYTE $0x08 // vucomisd xmm1, qword [rdx + 8] - WORD $0x950f; BYTE $0xd0 // setne al + LONG $0x4610fbc5; BYTE $0x08 // vmovsd xmm0, qword [rsi + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x422ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rdx + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x4610fbc5; BYTE $0x10 // vmovsd xmm0, qword [rsi + 16] LONG $0x422ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rdx + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x4610fbc5; BYTE $0x18 // vmovsd xmm0, qword [rsi + 24] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x422ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rdx + 24] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x4610fbc5; BYTE $0x20 // vmovsd xmm0, qword [rsi + 32] LONG $0x422ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rdx + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x4610fbc5; BYTE $0x28 // vmovsd xmm0, qword [rsi + 40] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x422ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rdx + 40] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x4610fbc5; BYTE $0x30 // vmovsd xmm0, qword [rsi + 48] LONG $0x422ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rdx + 48] - LONG $0x4610fbc5; BYTE $0x38 // vmovsd xmm0, qword [rsi + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x4610fbc5; BYTE $0x38 // vmovsd xmm0, qword [rsi + 56] LONG $0x422ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rdx + 56] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x4610fbc5; BYTE $0x40 // vmovsd xmm0, qword [rsi + 64] LONG $0x422ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rdx + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x4610fbc5; BYTE $0x48 // vmovsd xmm0, qword [rsi + 72] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x422ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rdx + 72] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x4610fbc5; BYTE $0x50 // vmovsd xmm0, qword [rsi + 80] LONG $0x422ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rdx + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x4610fbc5; BYTE $0x58 // vmovsd xmm0, qword [rsi + 88] - LONG $0xd1950f41 // setne r9b LONG $0x422ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rdx + 88] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x4610fbc5; BYTE $0x60 // vmovsd xmm0, qword [rsi + 96] LONG $0x422ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rdx + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x4610fbc5; BYTE $0x68 // vmovsd xmm0, qword [rsi + 104] - LONG $0xd2950f41 // setne r10b LONG $0x422ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rdx + 104] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x4610fbc5; BYTE $0x70 // vmovsd xmm0, qword [rsi + 112] LONG $0x422ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rdx + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x4610fbc5; BYTE $0x78 // vmovsd xmm0, qword [rsi + 120] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x422ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rdx + 120] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl QUAD $0x000000808610fbc5 // vmovsd xmm0, qword [rsi + 128] QUAD $0x00000080822ef9c5 // vucomisd xmm0, qword [rdx + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl QUAD $0x000000888610fbc5 // vmovsd xmm0, qword [rsi + 136] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] QUAD $0x00000088822ef9c5 // vucomisd xmm0, qword [rdx + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl QUAD $0x000000908610fbc5 // vmovsd xmm0, qword [rsi + 144] - LONG $0xd6950f41 // setne r14b QUAD $0x00000090822ef9c5 // vucomisd xmm0, qword [rdx + 144] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl QUAD $0x000000988610fbc5 // vmovsd xmm0, qword [rsi + 152] - LONG $0xd4950f41 // setne r12b QUAD $0x00000098822ef9c5 // vucomisd xmm0, qword [rdx + 152] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al QUAD $0x000000a08610fbc5 // vmovsd xmm0, qword [rsi + 160] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] QUAD $0x000000a0822ef9c5 // vucomisd xmm0, qword [rdx + 160] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl QUAD $0x000000a88610fbc5 // vmovsd xmm0, qword [rsi + 168] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] QUAD $0x000000a8822ef9c5 // vucomisd xmm0, qword [rdx + 168] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl QUAD $0x000000b08610fbc5 // vmovsd xmm0, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] QUAD $0x000000b0822ef9c5 // vucomisd xmm0, qword [rdx + 176] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl QUAD $0x000000b88610fbc5 // vmovsd xmm0, qword [rsi + 184] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] QUAD $0x000000b8822ef9c5 // vucomisd xmm0, qword [rdx + 184] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al QUAD $0x000000c08610fbc5 // vmovsd xmm0, qword [rsi + 192] - LONG $0xd0950f41 // setne r8b QUAD $0x000000c0822ef9c5 // vucomisd xmm0, qword [rdx + 192] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl QUAD $0x000000c88610fbc5 // vmovsd xmm0, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] QUAD $0x000000c8822ef9c5 // vucomisd xmm0, qword [rdx + 200] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl QUAD $0x000000d08610fbc5 // vmovsd xmm0, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] QUAD $0x000000d0822ef9c5 // vucomisd xmm0, qword [rdx + 208] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al QUAD $0x000000d88610fbc5 // vmovsd xmm0, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] QUAD $0x000000d8822ef9c5 // vucomisd xmm0, qword [rdx + 216] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000e08610fbc5 // vmovsd xmm0, qword [rsi + 224] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] QUAD $0x000000e0822ef9c5 // vucomisd xmm0, qword [rdx + 224] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl QUAD $0x000000e88610fbc5 // vmovsd xmm0, qword [rsi + 232] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] QUAD $0x000000e8822ef9c5 // vucomisd xmm0, qword [rdx + 232] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl QUAD $0x000000f08610fbc5 // vmovsd xmm0, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] QUAD $0x000000f0822ef9c5 // vucomisd xmm0, qword [rdx + 240] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al QUAD $0x000000f88610fbc5 // vmovsd xmm0, qword [rsi + 248] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 QUAD $0x000000f8822ef9c5 // vucomisd xmm0, qword [rdx + 248] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 + LONG $0x04e5c041 // shl r13b, 4 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0841; BYTE $0xce // or r14b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x74b60f44; WORD $0x0324 // movzx r14d, byte [rsp + 3] + WORD $0x0845; BYTE $0xee // or r14b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xcf // or r15b, cl + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x03e2c041 // shl r10b, 3 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xd1 // or cl, r10b + LONG $0x24348845 // mov byte [r12], r14b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB3_52 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB3_54: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_56: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x0410fbc5; BYTE $0xce // vmovsd xmm0, qword [rsi + 8*rcx] LONG $0x042ef9c5; BYTE $0xca // vucomisd xmm0, qword [rdx + 8*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl + WORD $0x9a0f; BYTE $0xd3 // setp bl + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd808 // or al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB3_56 JMP LBB3_123 @@ -17291,16 +18040,16 @@ LBB3_2: JE LBB3_57 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_8 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_6: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -17313,7 +18062,7 @@ LBB3_6: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -17322,49 +18071,49 @@ LBB3_6: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_6 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_8: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_12 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB3_10: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7950f41 // setne r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7950f40 // setne dil @@ -17379,16 +18128,16 @@ LBB3_10: LONG $0xd6950f41 // setne r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd0950f41 // setne r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4950f41 // setne r12b @@ -17397,144 +18146,144 @@ LBB3_10: LONG $0xd5950f41 // setne r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1950f41 // setne r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0950f41 // setne r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB3_10 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB3_12: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_14: @@ -17545,16 +18294,16 @@ LBB3_14: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_14 JMP LBB3_123 @@ -17563,16 +18312,16 @@ LBB3_30: JE LBB3_90 WORD $0xff83; BYTE $0x08 // cmp edi, 8 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_36 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_34: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -17585,7 +18334,7 @@ LBB3_34: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -17594,49 +18343,49 @@ LBB3_34: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_34 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_36: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_40 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_38: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5950f41 // setne r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0950f41 // setne r8b @@ -17648,165 +18397,165 @@ LBB3_38: LONG $0xd7950f41 // setne r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2950f41 // setne r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6950f41 // setne r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4950f41 // setne r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1950f41 // setne r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_38 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_40: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_42: @@ -17817,30 +18566,30 @@ LBB3_42: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_42 JMP LBB3_123 LBB3_68: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_72 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_70: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -17853,7 +18602,7 @@ LBB3_70: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -17862,49 +18611,49 @@ LBB3_70: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_70 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_72: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_76 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_74: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5950f41 // setne r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0950f41 // setne r8b @@ -17916,165 +18665,165 @@ LBB3_74: LONG $0xd7950f41 // setne r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2950f41 // setne r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6950f41 // setne r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4950f41 // setne r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1950f41 // setne r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_74 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_76: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_78: @@ -18085,30 +18834,30 @@ LBB3_78: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_78 JMP LBB3_123 LBB3_79: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_83 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_81: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -18121,7 +18870,7 @@ LBB3_81: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -18130,49 +18879,49 @@ LBB3_81: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_81 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_83: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_87 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_85: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5950f41 // setne r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0950f41 // setne r8b @@ -18184,165 +18933,165 @@ LBB3_85: LONG $0xd7950f41 // setne r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2950f41 // setne r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6950f41 // setne r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4950f41 // setne r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1950f41 // setne r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_85 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_87: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_89: @@ -18353,30 +19102,30 @@ LBB3_89: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_89 JMP LBB3_123 LBB3_101: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_105 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_103: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -18389,7 +19138,7 @@ LBB3_103: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -18398,49 +19147,49 @@ LBB3_103: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_103 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_105: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_109 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_107: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5950f41 // setne r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0950f41 // setne r8b @@ -18452,165 +19201,165 @@ LBB3_107: LONG $0xd7950f41 // setne r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2950f41 // setne r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6950f41 // setne r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4950f41 // setne r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1950f41 // setne r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_107 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_109: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_111: @@ -18621,294 +19370,389 @@ LBB3_111: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_111 JMP LBB3_123 LBB3_112: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_116 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB3_114: LONG $0x0610fac5 // vmovss xmm0, dword [rsi] LONG $0x04c68348 // add rsi, 4 LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] LONG $0x04528d48 // lea rdx, [rdx + 4] - LONG $0xd2950f41 // setne r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xcb08 // or bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB3_114 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_116: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_120 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB3_118: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x0610fac5 // vmovss xmm0, dword [rsi] - LONG $0x4e10fac5; BYTE $0x04 // vmovss xmm1, dword [rsi + 4] LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] - LONG $0x4a2ef8c5; BYTE $0x04 // vucomiss xmm1, dword [rdx + 4] - WORD $0x950f; BYTE $0xd0 // setne al + LONG $0x4610fac5; BYTE $0x04 // vmovss xmm0, dword [rsi + 4] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x422ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rdx + 4] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x4610fac5; BYTE $0x08 // vmovss xmm0, dword [rsi + 8] LONG $0x422ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rdx + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x4610fac5; BYTE $0x0c // vmovss xmm0, dword [rsi + 12] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x422ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rdx + 12] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x4610fac5; BYTE $0x10 // vmovss xmm0, dword [rsi + 16] LONG $0x422ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rdx + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x4610fac5; BYTE $0x14 // vmovss xmm0, dword [rsi + 20] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x422ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rdx + 20] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x4610fac5; BYTE $0x18 // vmovss xmm0, dword [rsi + 24] LONG $0x422ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rdx + 24] - LONG $0x4610fac5; BYTE $0x1c // vmovss xmm0, dword [rsi + 28] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x4610fac5; BYTE $0x1c // vmovss xmm0, dword [rsi + 28] LONG $0x422ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rdx + 28] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x4610fac5; BYTE $0x20 // vmovss xmm0, dword [rsi + 32] LONG $0x422ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rdx + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x4610fac5; BYTE $0x24 // vmovss xmm0, dword [rsi + 36] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x422ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rdx + 36] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x4610fac5; BYTE $0x28 // vmovss xmm0, dword [rsi + 40] LONG $0x422ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rdx + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x4610fac5; BYTE $0x2c // vmovss xmm0, dword [rsi + 44] - LONG $0xd1950f41 // setne r9b LONG $0x422ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rdx + 44] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x4610fac5; BYTE $0x30 // vmovss xmm0, dword [rsi + 48] LONG $0x422ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rdx + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x4610fac5; BYTE $0x34 // vmovss xmm0, dword [rsi + 52] - LONG $0xd2950f41 // setne r10b LONG $0x422ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rdx + 52] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x4610fac5; BYTE $0x38 // vmovss xmm0, dword [rsi + 56] LONG $0x422ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rdx + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x4610fac5; BYTE $0x3c // vmovss xmm0, dword [rsi + 60] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x422ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rdx + 60] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl LONG $0x4610fac5; BYTE $0x40 // vmovss xmm0, dword [rsi + 64] LONG $0x422ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rdx + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl LONG $0x4610fac5; BYTE $0x44 // vmovss xmm0, dword [rsi + 68] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x422ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rdx + 68] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl LONG $0x4610fac5; BYTE $0x48 // vmovss xmm0, dword [rsi + 72] - LONG $0xd6950f41 // setne r14b LONG $0x422ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rdx + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl LONG $0x4610fac5; BYTE $0x4c // vmovss xmm0, dword [rsi + 76] - LONG $0xd4950f41 // setne r12b LONG $0x422ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rdx + 76] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al LONG $0x4610fac5; BYTE $0x50 // vmovss xmm0, dword [rsi + 80] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] LONG $0x422ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rdx + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x4610fac5; BYTE $0x54 // vmovss xmm0, dword [rsi + 84] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x422ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rdx + 84] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl LONG $0x4610fac5; BYTE $0x58 // vmovss xmm0, dword [rsi + 88] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] LONG $0x422ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rdx + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl LONG $0x4610fac5; BYTE $0x5c // vmovss xmm0, dword [rsi + 92] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x422ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rdx + 92] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al LONG $0x4610fac5; BYTE $0x60 // vmovss xmm0, dword [rsi + 96] - LONG $0xd0950f41 // setne r8b LONG $0x422ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rdx + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl LONG $0x4610fac5; BYTE $0x64 // vmovss xmm0, dword [rsi + 100] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x422ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rdx + 100] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x4610fac5; BYTE $0x68 // vmovss xmm0, dword [rsi + 104] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x422ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rdx + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al LONG $0x4610fac5; BYTE $0x6c // vmovss xmm0, dword [rsi + 108] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x422ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rdx + 108] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al LONG $0x4610fac5; BYTE $0x70 // vmovss xmm0, dword [rsi + 112] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x422ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rdx + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl LONG $0x4610fac5; BYTE $0x74 // vmovss xmm0, dword [rsi + 116] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x422ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rdx + 116] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x4610fac5; BYTE $0x78 // vmovss xmm0, dword [rsi + 120] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x422ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rdx + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x4610fac5; BYTE $0x7c // vmovss xmm0, dword [rsi + 124] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 LONG $0x422ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rdx + 124] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 + LONG $0x04e5c041 // shl r13b, 4 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x5cb60f44; WORD $0x0324 // movzx r11d, byte [rsp + 3] + WORD $0x0845; BYTE $0xeb // or r11b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0841; BYTE $0xca // or r10b, cl + WORD $0x0841; BYTE $0xda // or r10b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e7c041 // shl r15b, 2 + WORD $0x0841; BYTE $0xcf // or r15b, cl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xf1 // or cl, r14b + LONG $0x241c8845 // mov byte [r12], r11b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + LONG $0x24548845; BYTE $0x02 // mov byte [r12 + 2], r10b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB3_118 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB3_120: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_122: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x0410fac5; BYTE $0x8e // vmovss xmm0, dword [rsi + 4*rcx] LONG $0x042ef8c5; BYTE $0x8a // vucomiss xmm0, dword [rdx + 4*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl + WORD $0x9a0f; BYTE $0xd3 // setp bl + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd808 // or al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB3_122 JMP LBB3_123 LBB3_57: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_61 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_59: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -18921,7 +19765,7 @@ LBB3_59: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -18930,49 +19774,49 @@ LBB3_59: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_59 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_61: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_65 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB3_63: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7950f41 // setne r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7950f40 // setne dil @@ -18987,16 +19831,16 @@ LBB3_63: LONG $0xd6950f41 // setne r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd0950f41 // setne r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4950f41 // setne r12b @@ -19005,144 +19849,144 @@ LBB3_63: LONG $0xd5950f41 // setne r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1950f41 // setne r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0950f41 // setne r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB3_63 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB3_65: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_67: @@ -19153,30 +19997,30 @@ LBB3_67: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_67 JMP LBB3_123 LBB3_90: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_94 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_92: WORD $0x0e8b // mov ecx, dword [rsi] @@ -19189,7 +20033,7 @@ LBB3_92: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -19198,49 +20042,49 @@ LBB3_92: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_92 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_94: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_98 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_96: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5950f41 // setne r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0950f41 // setne r8b @@ -19252,165 +20096,165 @@ LBB3_96: LONG $0xd7950f41 // setne r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2950f41 // setne r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6950f41 // setne r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4950f41 // setne r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1950f41 // setne r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_96 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_98: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_100: @@ -19421,16 +20265,16 @@ LBB3_100: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_100 LBB3_123: @@ -19492,7 +20336,7 @@ TEXT ·_comparison_not_equal_arr_scalar_avx2(SB), $1320-48 WORD $0xff83; BYTE $0x05 // cmp edi, 5 JE LBB4_56 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB4_159 + JNE LBB4_165 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -19542,7 +20386,7 @@ LBB4_11: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6950f41 // setne r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -19558,11 +20402,11 @@ LBB4_11: LONG $0x206e3944 // cmp dword [rsi + 32], r13d QUAD $0x000000a02494950f // setne byte [rsp + 160] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3950f41 // setne r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -19602,67 +20446,68 @@ LBB4_11: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009024bc0240 // add dil, byte [rsp + 144] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009024940244 // add r10b, byte [rsp + 144] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xc900 // add cl, cl @@ -19685,14 +20530,14 @@ LBB4_11: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 JNE LBB4_11 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] @@ -19701,7 +20546,7 @@ LBB4_11: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB4_101 - JMP LBB4_159 + JMP LBB4_165 LBB4_13: WORD $0xff83; BYTE $0x08 // cmp edi, 8 @@ -19711,7 +20556,7 @@ LBB4_13: WORD $0xff83; BYTE $0x0b // cmp edi, 11 JE LBB4_72 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB4_159 + JNE LBB4_165 LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -19727,7 +20572,9 @@ LBB4_13: LBB4_19: LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] LONG $0x08768d48 // lea rsi, [rsi + 8] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax @@ -19753,178 +20600,263 @@ LBB4_21: LONG $0x20fa8349 // cmp r10, 32 JL LBB4_104 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 LBB4_23: LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - QUAD $0x000000982494950f // setne byte [rsp + 152] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] - LONG $0xd1950f41 // setne r9b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x462ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rsi + 16] - LONG $0xd6950f41 // setne r14b - LONG $0x462ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rsi + 24] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x462ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rsi + 24] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl LONG $0x462ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rsi + 32] - QUAD $0x000000882494950f // setne byte [rsp + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl LONG $0x462ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rsi + 40] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x462ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rsi + 48] - WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x462ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rsi + 56] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl LONG $0x462ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rsi + 64] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x462ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rsi + 72] - WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x462ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rsi + 80] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x462ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rsi + 88] - LONG $0xd2950f41 // setne r10b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x462ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rsi + 96] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x462ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rsi + 104] - LONG $0xd4950f41 // setne r12b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x462ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rsi + 112] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al LONG $0x462ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rsi + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl QUAD $0x00000080862ef9c5 // vucomisd xmm0, qword [rsi + 128] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl QUAD $0x00000088862ef9c5 // vucomisd xmm0, qword [rsi + 136] - QUAD $0x000000a02494950f // setne byte [rsp + 160] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl QUAD $0x00000090862ef9c5 // vucomisd xmm0, qword [rsi + 144] - QUAD $0x000000802494950f // setne byte [rsp + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al QUAD $0x00000098862ef9c5 // vucomisd xmm0, qword [rsi + 152] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x80248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], al QUAD $0x000000a0862ef9c5 // vucomisd xmm0, qword [rsi + 160] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x78244488 // mov byte [rsp + 120], al QUAD $0x000000a8862ef9c5 // vucomisd xmm0, qword [rsi + 168] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0xa0248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], al QUAD $0x000000b0862ef9c5 // vucomisd xmm0, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al QUAD $0x000000b8862ef9c5 // vucomisd xmm0, qword [rsi + 184] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000c0862ef9c5 // vucomisd xmm0, qword [rsi + 192] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x70244488 // mov byte [rsp + 112], al QUAD $0x000000c8862ef9c5 // vucomisd xmm0, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al QUAD $0x000000d0862ef9c5 // vucomisd xmm0, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al QUAD $0x000000d8862ef9c5 // vucomisd xmm0, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al QUAD $0x000000e0862ef9c5 // vucomisd xmm0, qword [rsi + 224] - QUAD $0x000001202494950f // setne byte [rsp + 288] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x98248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], al QUAD $0x000000e8862ef9c5 // vucomisd xmm0, qword [rsi + 232] - QUAD $0x000001402494950f // setne byte [rsp + 320] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x90248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], al QUAD $0x000000f0862ef9c5 // vucomisd xmm0, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al QUAD $0x000000f8862ef9c5 // vucomisd xmm0, qword [rsi + 248] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd0950f41 // setne r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000098248c0244 // add r9b, byte [rsp + 152] + WORD $0x0841; BYTE $0xc0 // or r8b, al + WORD $0xdb00 // add bl, bl + LONG $0x20245c02 // add bl, byte [rsp + 32] + LONG $0x05e7c040 // shl dil, 5 + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x50244402 // add al, byte [rsp + 80] + LONG $0x38244402 // add al, byte [rsp + 56] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x247cb60f; BYTE $0x58 // movzx edi, byte [rsp + 88] + LONG $0x05e7c040 // shl dil, 5 + WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x6cb60f44; WORD $0x5024 // movzx r13d, byte [rsp + 80] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xd5 // or r13b, dl + QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] + WORD $0xd200 // add dl, dl + LONG $0x48245402 // add dl, byte [rsp + 72] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd108 // or cl, dl QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] + WORD $0xca08 // or dl, cl + WORD $0xd189 // mov ecx, edx + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xca08 // or dl, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xc108 // or cl, al + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + LONG $0x06e4c041 // shl r12b, 6 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0845; BYTE $0xe2 // or r10b, r12b + WORD $0x0841; BYTE $0xd2 // or r10b, dl + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x70 // add r15b, byte [rsp + 112] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xf3 // or r11b, r14b + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd8 // or al, r11b + WORD $0xc389 // mov ebx, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x0888 // mov byte [rax], cl + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + WORD $0xe2c0; BYTE $0x05 // shl dl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd1 // or r9b, dl + LONG $0x01688844 // mov byte [rax + 1], r13b LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + LONG $0x02508844 // mov byte [rax + 2], r10b + LONG $0x03408844 // mov byte [rax + 3], r8b LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x0000009024848348; BYTE $0xff // add qword [rsp + 144], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 JNE LBB4_23 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB4_105 - JMP LBB4_159 + JMP LBB4_165 LBB4_25: WORD $0xff83; BYTE $0x02 // cmp edi, 2 JE LBB4_80 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB4_159 + JNE LBB4_165 WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -19975,11 +20907,11 @@ LBB4_31: LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc5 // cmp r13, rax - JAE LBB4_165 + JAE LBB4_166 QUAD $0x00000000bd048d4a // lea rax, [4*r15] WORD $0x014c; BYTE $0xe8 // add rax, r13 WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB4_165 + JAE LBB4_166 LBB4_34: WORD $0xc031 // xor eax, eax @@ -20164,7 +21096,7 @@ LBB4_38: WORD $0xff83; BYTE $0x07 // cmp edi, 7 JE LBB4_92 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB4_159 + JNE LBB4_165 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -20214,7 +21146,7 @@ LBB4_46: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6950f41 // setne r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -20230,11 +21162,11 @@ LBB4_46: LONG $0x406e394c // cmp qword [rsi + 64], r13 QUAD $0x000000a02494950f // setne byte [rsp + 160] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3950f41 // setne r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -20274,32 +21206,33 @@ LBB4_46: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009024bc0240 // add dil, byte [rsp + 144] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009024940244 // add r10b, byte [rsp + 144] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] @@ -20307,60 +21240,60 @@ LBB4_46: LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1b // mov byte [r11], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x1c // movzx edx, byte [rsp + 28] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xc000 // add al, al + LONG $0x20244402 // add al, byte [rsp + 32] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x03438845 // mov byte [r11 + 3], r8b + LONG $0x03538841 // mov byte [r11 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c38349 // add r11, 4 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 @@ -20371,7 +21304,7 @@ LBB4_46: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB4_108 - JMP LBB4_159 + JMP LBB4_165 LBB4_48: LONG $0x2ab70f44 // movzx r13d, word [rdx] @@ -20414,387 +21347,389 @@ LBB4_52: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB4_111 - QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 - QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 - -LBB4_54: - LONG $0x2e394466 // cmp word [rsi], r13w - WORD $0x950f; BYTE $0xd0 // setne al - LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd7950f40 // setne dil - LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w - LONG $0xd6950f41 // setne r14b - LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w - QUAD $0x000000982494950f // setne byte [rsp + 152] - LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w - QUAD $0x000000882494950f // setne byte [rsp + 136] - LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w - QUAD $0x000000902494950f // setne byte [rsp + 144] - LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w - WORD $0x950f; BYTE $0xd3 // setne bl - LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w - QUAD $0x000000a02494950f // setne byte [rsp + 160] - LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w - WORD $0x950f; BYTE $0xd2 // setne dl - LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w - LONG $0xd1950f41 // setne r9b - LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd2950f41 // setne r10b - LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w - LONG $0xd3950f41 // setne r11b - LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w - LONG $0xd4950f41 // setne r12b - LONG $0x6e394466; BYTE $0x1c // cmp word [rsi + 28], r13w - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] - LONG $0x6e394466; BYTE $0x1e // cmp word [rsi + 30], r13w - WORD $0x950f; BYTE $0xd1 // setne cl - LONG $0x6e394466; BYTE $0x20 // cmp word [rsi + 32], r13w - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x6e394466; BYTE $0x22 // cmp word [rsi + 34], r13w - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] - LONG $0x6e394466; BYTE $0x24 // cmp word [rsi + 36], r13w - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x6e394466; BYTE $0x26 // cmp word [rsi + 38], r13w - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] - LONG $0x6e394466; BYTE $0x28 // cmp word [rsi + 40], r13w - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x6e394466; BYTE $0x2a // cmp word [rsi + 42], r13w - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x6e394466; BYTE $0x2c // cmp word [rsi + 44], r13w - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] - LONG $0x6e394466; BYTE $0x2e // cmp word [rsi + 46], r13w - LONG $0xd7950f41 // setne r15b - LONG $0x6e394466; BYTE $0x30 // cmp word [rsi + 48], r13w - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x6e394466; BYTE $0x32 // cmp word [rsi + 50], r13w - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x6e394466; BYTE $0x34 // cmp word [rsi + 52], r13w - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] - LONG $0x6e394466; BYTE $0x36 // cmp word [rsi + 54], r13w - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x6e394466; BYTE $0x38 // cmp word [rsi + 56], r13w - QUAD $0x000001202494950f // setne byte [rsp + 288] - LONG $0x6e394466; BYTE $0x3a // cmp word [rsi + 58], r13w - QUAD $0x000001402494950f // setne byte [rsp + 320] - LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w - LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] - LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - WORD $0x0840; BYTE $0xc7 // or dil, al - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x40c68348 // add rsi, 64 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB4_54 - QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] - QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] - LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB4_112 - JMP LBB4_159 - -LBB4_56: - LONG $0x2ab70f44 // movzx r13d, word [rdx] - LONG $0x1f7a8d4d // lea r15, [r10 + 31] - WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xfa490f4d // cmovns r15, r10 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB4_60 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d - -LBB4_58: - LONG $0x2e394466 // cmp word [rsi], r13w - LONG $0x02768d48 // lea rsi, [rsi + 2] - WORD $0x950f; BYTE $0xd2 // setne dl - WORD $0xdaf6 // neg dl - LONG $0x07588d48 // lea rbx, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xd8490f48 // cmovns rbx, rax - LONG $0x03fbc148 // sar rbx, 3 - LONG $0x04b60f45; BYTE $0x1b // movzx r8d, byte [r11 + rbx] - WORD $0x3044; BYTE $0xc2 // xor dl, r8b - LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] - WORD $0xc189 // mov ecx, eax - WORD $0xf929 // sub ecx, edi - LONG $0x000001bf; BYTE $0x00 // mov edi, 1 - WORD $0xe7d3 // shl edi, cl - WORD $0x2040; BYTE $0xd7 // and dil, dl - WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1b3c8841 // mov byte [r11 + rbx], dil - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 - JNE LBB4_58 - LONG $0x01c38349 // add r11, 1 - -LBB4_60: - LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fa8349 // cmp r10, 32 - JL LBB4_115 - QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 - QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 - -LBB4_62: - LONG $0x2e394466 // cmp word [rsi], r13w - QUAD $0x000000902494950f // setne byte [rsp + 144] - LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd7950f40 // setne dil - LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w - LONG $0xd6950f41 // setne r14b - LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w - QUAD $0x000000982494950f // setne byte [rsp + 152] - LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w - QUAD $0x000000882494950f // setne byte [rsp + 136] - LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w - WORD $0x950f; BYTE $0xd0 // setne al - LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w - WORD $0x950f; BYTE $0xd3 // setne bl - LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w - QUAD $0x000000a02494950f // setne byte [rsp + 160] - LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w - WORD $0x950f; BYTE $0xd2 // setne dl - LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w - LONG $0xd1950f41 // setne r9b - LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd2950f41 // setne r10b - LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w - LONG $0xd3950f41 // setne r11b - LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w - LONG $0xd4950f41 // setne r12b - LONG $0x6e394466; BYTE $0x1c // cmp word [rsi + 28], r13w - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] - LONG $0x6e394466; BYTE $0x1e // cmp word [rsi + 30], r13w - WORD $0x950f; BYTE $0xd1 // setne cl - LONG $0x6e394466; BYTE $0x20 // cmp word [rsi + 32], r13w - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x6e394466; BYTE $0x22 // cmp word [rsi + 34], r13w - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] - LONG $0x6e394466; BYTE $0x24 // cmp word [rsi + 36], r13w - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x6e394466; BYTE $0x26 // cmp word [rsi + 38], r13w - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] - LONG $0x6e394466; BYTE $0x28 // cmp word [rsi + 40], r13w - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x6e394466; BYTE $0x2a // cmp word [rsi + 42], r13w - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x6e394466; BYTE $0x2c // cmp word [rsi + 44], r13w - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] - LONG $0x6e394466; BYTE $0x2e // cmp word [rsi + 46], r13w - LONG $0xd7950f41 // setne r15b - LONG $0x6e394466; BYTE $0x30 // cmp word [rsi + 48], r13w - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x6e394466; BYTE $0x32 // cmp word [rsi + 50], r13w - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x6e394466; BYTE $0x34 // cmp word [rsi + 52], r13w - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] - LONG $0x6e394466; BYTE $0x36 // cmp word [rsi + 54], r13w - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x6e394466; BYTE $0x38 // cmp word [rsi + 56], r13w - QUAD $0x000001202494950f // setne byte [rsp + 288] - LONG $0x6e394466; BYTE $0x3a // cmp word [rsi + 58], r13w - QUAD $0x000001402494950f // setne byte [rsp + 320] - LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w - LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] - LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009024bc0240 // add dil, byte [rsp + 144] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x40c68348 // add rsi, 64 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + QUAD $0x000001182494894c // mov qword [rsp + 280], r10 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 + QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + +LBB4_54: + LONG $0x2e394466 // cmp word [rsi], r13w + WORD $0x950f; BYTE $0xd0 // setne al + LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w + LONG $0xd2950f41 // setne r10b + LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w + LONG $0xd6950f41 // setne r14b + LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w + QUAD $0x000000982494950f // setne byte [rsp + 152] + LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w + QUAD $0x000000882494950f // setne byte [rsp + 136] + LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w + QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w + WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w + QUAD $0x000000a02494950f // setne byte [rsp + 160] + LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w + LONG $0xd7950f40 // setne dil + LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w + LONG $0xd0950f41 // setne r8b + LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w + LONG $0xd1950f41 // setne r9b + LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w + LONG $0xd3950f41 // setne r11b + LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w + LONG $0xd4950f41 // setne r12b + LONG $0x6e394466; BYTE $0x1c // cmp word [rsi + 28], r13w + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x6e394466; BYTE $0x1e // cmp word [rsi + 30], r13w + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x6e394466; BYTE $0x20 // cmp word [rsi + 32], r13w + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x6e394466; BYTE $0x22 // cmp word [rsi + 34], r13w + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x6e394466; BYTE $0x24 // cmp word [rsi + 36], r13w + QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x6e394466; BYTE $0x26 // cmp word [rsi + 38], r13w + LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x6e394466; BYTE $0x28 // cmp word [rsi + 40], r13w + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x6e394466; BYTE $0x2a // cmp word [rsi + 42], r13w + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x6e394466; BYTE $0x2c // cmp word [rsi + 44], r13w + LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + LONG $0x6e394466; BYTE $0x2e // cmp word [rsi + 46], r13w + LONG $0xd7950f41 // setne r15b + LONG $0x6e394466; BYTE $0x30 // cmp word [rsi + 48], r13w + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x6e394466; BYTE $0x32 // cmp word [rsi + 50], r13w + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x6e394466; BYTE $0x34 // cmp word [rsi + 52], r13w + LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x6e394466; BYTE $0x36 // cmp word [rsi + 54], r13w + LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + LONG $0x6e394466; BYTE $0x38 // cmp word [rsi + 56], r13w + QUAD $0x000001202494950f // setne byte [rsp + 288] + LONG $0x6e394466; BYTE $0x3a // cmp word [rsi + 58], r13w + QUAD $0x000001402494950f // setne byte [rsp + 320] + LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w + LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] + LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + WORD $0xc308 // or bl, al + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xf0 // or al, r14b + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x05e4c041 // shl r12b, 5 + WORD $0x0845; BYTE $0xdc // or r12b, r11b + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] + LONG $0x06e7c040 // shl dil, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0x0840; BYTE $0xf9 // or cl, dil + WORD $0xc308 // or bl, al + WORD $0x0844; BYTE $0xe1 // or cl, r12b + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl + WORD $0x0841; BYTE $0xff // or r15b, dil + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + WORD $0xc900 // add cl, cl + LONG $0x20244c02 // add cl, byte [rsp + 32] + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xd908 // or cl, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl + LONG $0x40c68348 // add rsi, 64 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + JNE LBB4_54 + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] + QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + LONG $0x05e7c149 // shl r15, 5 + WORD $0x394d; BYTE $0xd7 // cmp r15, r10 + JL LBB4_112 + JMP LBB4_165 + +LBB4_56: + LONG $0x2ab70f44 // movzx r13d, word [rdx] + LONG $0x1f7a8d4d // lea r15, [r10 + 31] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f4d // cmovns r15, r10 + LONG $0x07418d41 // lea eax, [r9 + 7] + WORD $0x8545; BYTE $0xc9 // test r9d, r9d + LONG $0xc1490f41 // cmovns eax, r9d + WORD $0xe083; BYTE $0xf8 // and eax, -8 + WORD $0x2941; BYTE $0xc1 // sub r9d, eax + JE LBB4_60 + WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + +LBB4_58: + LONG $0x2e394466 // cmp word [rsi], r13w + LONG $0x02768d48 // lea rsi, [rsi + 2] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xdaf6 // neg dl + LONG $0x07588d48 // lea rbx, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xd8490f48 // cmovns rbx, rax + LONG $0x03fbc148 // sar rbx, 3 + LONG $0x04b60f45; BYTE $0x1b // movzx r8d, byte [r11 + rbx] + WORD $0x3044; BYTE $0xc2 // xor dl, r8b + LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] + WORD $0xc189 // mov ecx, eax + WORD $0xf929 // sub ecx, edi + LONG $0x000001bf; BYTE $0x00 // mov edi, 1 + WORD $0xe7d3 // shl edi, cl + WORD $0x2040; BYTE $0xd7 // and dil, dl + WORD $0x3044; BYTE $0xc7 // xor dil, r8b + LONG $0x1b3c8841 // mov byte [r11 + rbx], dil + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 + JNE LBB4_58 + LONG $0x01c38349 // add r11, 1 + +LBB4_60: + LONG $0x05ffc149 // sar r15, 5 + LONG $0x20fa8349 // cmp r10, 32 + JL LBB4_115 + QUAD $0x000001182494894c // mov qword [rsp + 280], r10 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 + QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + +LBB4_62: + LONG $0x2e394466 // cmp word [rsi], r13w + QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w + LONG $0xd2950f41 // setne r10b + LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w + LONG $0xd6950f41 // setne r14b + LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w + QUAD $0x000000982494950f // setne byte [rsp + 152] + LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w + QUAD $0x000000882494950f // setne byte [rsp + 136] + LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w + WORD $0x950f; BYTE $0xd0 // setne al + LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w + WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w + QUAD $0x000000a02494950f // setne byte [rsp + 160] + LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w + LONG $0xd7950f40 // setne dil + LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w + LONG $0xd0950f41 // setne r8b + LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w + LONG $0xd1950f41 // setne r9b + LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w + LONG $0xd3950f41 // setne r11b + LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w + LONG $0xd4950f41 // setne r12b + LONG $0x6e394466; BYTE $0x1c // cmp word [rsi + 28], r13w + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x6e394466; BYTE $0x1e // cmp word [rsi + 30], r13w + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x6e394466; BYTE $0x20 // cmp word [rsi + 32], r13w + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x6e394466; BYTE $0x22 // cmp word [rsi + 34], r13w + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x6e394466; BYTE $0x24 // cmp word [rsi + 36], r13w + QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x6e394466; BYTE $0x26 // cmp word [rsi + 38], r13w + LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x6e394466; BYTE $0x28 // cmp word [rsi + 40], r13w + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x6e394466; BYTE $0x2a // cmp word [rsi + 42], r13w + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x6e394466; BYTE $0x2c // cmp word [rsi + 44], r13w + LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + LONG $0x6e394466; BYTE $0x2e // cmp word [rsi + 46], r13w + LONG $0xd7950f41 // setne r15b + LONG $0x6e394466; BYTE $0x30 // cmp word [rsi + 48], r13w + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x6e394466; BYTE $0x32 // cmp word [rsi + 50], r13w + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x6e394466; BYTE $0x34 // cmp word [rsi + 52], r13w + LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x6e394466; BYTE $0x36 // cmp word [rsi + 54], r13w + LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + LONG $0x6e394466; BYTE $0x38 // cmp word [rsi + 56], r13w + QUAD $0x000001202494950f // setne byte [rsp + 288] + LONG $0x6e394466; BYTE $0x3a // cmp word [rsi + 58], r13w + QUAD $0x000001402494950f // setne byte [rsp + 320] + LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w + LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] + LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009024940244 // add r10b, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + WORD $0xc308 // or bl, al + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xf0 // or al, r14b + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x05e4c041 // shl r12b, 5 + WORD $0x0845; BYTE $0xdc // or r12b, r11b + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] + LONG $0x06e7c040 // shl dil, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0x0840; BYTE $0xf9 // or cl, dil + WORD $0xc308 // or bl, al + WORD $0x0844; BYTE $0xe1 // or cl, r12b + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl + WORD $0x0841; BYTE $0xff // or r15b, dil + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + WORD $0xc900 // add cl, cl + LONG $0x20244c02 // add cl, byte [rsp + 32] + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xd908 // or cl, bl + LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl + LONG $0x40c68348 // add rsi, 64 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 JNE LBB4_62 - QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] - QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] - LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xd7 // cmp r15, r10 + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] + QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + LONG $0x05e7c149 // shl r15, 5 + WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB4_116 - JMP LBB4_159 + JMP LBB4_165 LBB4_64: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] @@ -20846,7 +21781,7 @@ LBB4_70: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6950f41 // setne r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -20862,11 +21797,11 @@ LBB4_70: LONG $0x406e394c // cmp qword [rsi + 64], r13 QUAD $0x000000a02494950f // setne byte [rsp + 160] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3950f41 // setne r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -20906,276 +21841,68 @@ LBB4_70: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009024bc0240 // add dil, byte [rsp + 144] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009024940244 // add r10b, byte [rsp + 144] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB4_70 - QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] - QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] - LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB4_119 - JMP LBB4_159 - -LBB4_72: - LONG $0x1f7a8d4d // lea r15, [r10 + 31] - WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xfa490f4d // cmovns r15, r10 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - LONG $0x0210fac5 // vmovss xmm0, dword [rdx] - WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB4_76 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d - -LBB4_74: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - LONG $0x04768d48 // lea rsi, [rsi + 4] - WORD $0x950f; BYTE $0xd2 // setne dl - WORD $0xdaf6 // neg dl - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax - LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b - QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] - WORD $0xc189 // mov ecx, eax - WORD $0x2944; BYTE $0xc1 // sub ecx, r8d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0xd320 // and bl, dl - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3b1c8841 // mov byte [r11 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 - JNE LBB4_74 - LONG $0x01c38349 // add r11, 1 - -LBB4_76: - LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fa8349 // cmp r10, 32 - JL LBB4_121 - QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 - -LBB4_78: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - QUAD $0x000000982494950f // setne byte [rsp + 152] - LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] - LONG $0xd1950f41 // setne r9b - LONG $0x462ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rsi + 8] - LONG $0xd6950f41 // setne r14b - LONG $0x462ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rsi + 12] - LONG $0xd5950f41 // setne r13b - LONG $0x462ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rsi + 16] - QUAD $0x000000882494950f // setne byte [rsp + 136] - LONG $0x462ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rsi + 20] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x462ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rsi + 24] - WORD $0x950f; BYTE $0xd0 // setne al - LONG $0x462ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rsi + 28] - WORD $0x950f; BYTE $0xd3 // setne bl - LONG $0x462ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rsi + 32] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] - LONG $0x462ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rsi + 36] - WORD $0x950f; BYTE $0xd2 // setne dl - LONG $0x462ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rsi + 40] - LONG $0xd7950f40 // setne dil - LONG $0x462ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rsi + 44] - LONG $0xd2950f41 // setne r10b - LONG $0x462ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rsi + 48] - LONG $0xd3950f41 // setne r11b - LONG $0x462ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rsi + 52] - LONG $0xd4950f41 // setne r12b - LONG $0x462ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rsi + 56] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] - LONG $0x462ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rsi + 60] - WORD $0x950f; BYTE $0xd1 // setne cl - LONG $0x462ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rsi + 64] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x462ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rsi + 68] - QUAD $0x000000a02494950f // setne byte [rsp + 160] - LONG $0x462ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rsi + 72] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x462ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rsi + 76] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] - LONG $0x462ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rsi + 80] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x462ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rsi + 84] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x462ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rsi + 88] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] - LONG $0x462ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rsi + 92] - LONG $0xd7950f41 // setne r15b - LONG $0x462ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rsi + 96] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x462ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rsi + 100] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x462ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rsi + 104] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] - LONG $0x462ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rsi + 108] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x462ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rsi + 112] - QUAD $0x000001202494950f // setne byte [rsp + 288] - LONG $0x462ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rsi + 116] - QUAD $0x000001402494950f // setne byte [rsp + 320] - LONG $0x462ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rsi + 120] - LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] - LONG $0x462ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rsi + 124] - LONG $0xd0950f41 // setne r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000098248c0244 // add r9b, byte [rsp + 152] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al LONG $0x50244402 // add al, byte [rsp + 80] - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xc900 // add cl, cl @@ -21198,23 +21925,319 @@ LBB4_78: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + JNE LBB4_70 + QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] + QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + LONG $0x05e7c149 // shl r15, 5 + WORD $0x394d; BYTE $0xd7 // cmp r15, r10 + JL LBB4_119 + JMP LBB4_165 + +LBB4_72: + LONG $0x1f7a8d4d // lea r15, [r10 + 31] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f4d // cmovns r15, r10 + LONG $0x07418d41 // lea eax, [r9 + 7] + WORD $0x8545; BYTE $0xc9 // test r9d, r9d + LONG $0xc1490f41 // cmovns eax, r9d + WORD $0xe083; BYTE $0xf8 // and eax, -8 + LONG $0x0210fac5 // vmovss xmm0, dword [rdx] + WORD $0x2941; BYTE $0xc1 // sub r9d, eax + JE LBB4_76 + WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + +LBB4_74: + LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] + LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl + WORD $0xdaf6 // neg dl + LONG $0x07788d48 // lea rdi, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xf8490f48 // cmovns rdi, rax + LONG $0x03ffc148 // sar rdi, 3 + LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] + WORD $0x3044; BYTE $0xca // xor dl, r9b + QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] + WORD $0xc189 // mov ecx, eax + WORD $0x2944; BYTE $0xc1 // sub ecx, r8d + LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 + WORD $0xe3d3 // shl ebx, cl + WORD $0xd320 // and bl, dl + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x3b1c8841 // mov byte [r11 + rdi], bl + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 + JNE LBB4_74 + LONG $0x01c38349 // add r11, 1 + +LBB4_76: + LONG $0x05ffc149 // sar r15, 5 + LONG $0x20fa8349 // cmp r10, 32 + JL LBB4_121 + QUAD $0x000001182494894c // mov qword [rsp + 280], r10 + QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 + QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 + QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + +LBB4_78: + LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl + LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al + LONG $0x462ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rsi + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x462ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rsi + 12] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl + LONG $0x462ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rsi + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl + LONG $0x462ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rsi + 20] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al + LONG $0x462ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rsi + 24] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl + LONG $0x462ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rsi + 28] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl + LONG $0x462ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rsi + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl + LONG $0x462ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rsi + 36] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl + LONG $0x462ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rsi + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl + LONG $0x462ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rsi + 44] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl + LONG $0x462ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rsi + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl + LONG $0x462ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rsi + 52] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl + LONG $0x462ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rsi + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + LONG $0x462ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rsi + 60] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl + LONG $0x462ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rsi + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl + LONG $0x462ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rsi + 68] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl + LONG $0x462ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rsi + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x462ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rsi + 76] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x80248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], al + LONG $0x462ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rsi + 80] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x78244488 // mov byte [rsp + 120], al + LONG $0x462ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rsi + 84] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0xa0248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], al + LONG $0x462ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rsi + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x462ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rsi + 92] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al + LONG $0x462ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rsi + 96] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x70244488 // mov byte [rsp + 112], al + LONG $0x462ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rsi + 100] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x462ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rsi + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x462ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rsi + 108] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al + LONG $0x462ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rsi + 112] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x98248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], al + LONG $0x462ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rsi + 116] + LONG $0xd09a0f41 // setp r8b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xc0 // or al, r8b + LONG $0x90248488; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], al + LONG $0x462ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rsi + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al + LONG $0x462ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rsi + 124] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al + WORD $0xdb00 // add bl, bl + LONG $0x20245c02 // add bl, byte [rsp + 32] + LONG $0x05e7c040 // shl dil, 5 + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xc000 // add al, al + LONG $0x38244402 // add al, byte [rsp + 56] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x247cb60f; BYTE $0x58 // movzx edi, byte [rsp + 88] + LONG $0x05e7c040 // shl dil, 5 + WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x6cb60f44; WORD $0x5024 // movzx r13d, byte [rsp + 80] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xd5 // or r13b, dl + QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] + WORD $0xd200 // add dl, dl + LONG $0x48245402 // add dl, byte [rsp + 72] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd108 // or cl, dl + QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] + WORD $0xe2c0; BYTE $0x03 // shl dl, 3 + WORD $0xca08 // or dl, cl + WORD $0xd189 // mov ecx, edx + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] + WORD $0xe2c0; BYTE $0x04 // shl dl, 4 + WORD $0xca08 // or dl, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xc108 // or cl, al + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + LONG $0x06e4c041 // shl r12b, 6 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0845; BYTE $0xe2 // or r10b, r12b + WORD $0x0841; BYTE $0xd2 // or r10b, dl + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x70 // add r15b, byte [rsp + 112] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xf3 // or r11b, r14b + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd8 // or al, r11b + WORD $0xc389 // mov ebx, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x0888 // mov byte [rax], cl + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + WORD $0xe2c0; BYTE $0x05 // shl dl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd1 // or r9b, dl + LONG $0x01688844 // mov byte [rax + 1], r13b LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + LONG $0x02508844 // mov byte [rax + 2], r10b + LONG $0x03408844 // mov byte [rax + 3], r8b LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx - QUAD $0x0000009024848348; BYTE $0xff // add qword [rsp + 144], -1 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 JNE LBB4_78 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB4_122 - JMP LBB4_159 + JMP LBB4_165 LBB4_80: WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] @@ -21266,10 +22289,10 @@ LBB4_84: LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB4_168 + JAE LBB4_169 LONG $0xbb048d4b // lea rax, [r11 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB4_168 + JAE LBB4_169 LBB4_88: WORD $0xc031 // xor eax, eax @@ -21499,7 +22522,7 @@ LBB4_98: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6950f41 // setne r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -21515,11 +22538,11 @@ LBB4_98: LONG $0x206e3944 // cmp dword [rsi + 32], r13d QUAD $0x000000a02494950f // setne byte [rsp + 160] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3950f41 // setne r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -21559,32 +22582,33 @@ LBB4_98: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009024bc0240 // add dil, byte [rsp + 144] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009024940244 // add r10b, byte [rsp + 144] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] @@ -21592,60 +22616,60 @@ LBB4_98: LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1b // mov byte [r11], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x1c // movzx edx, byte [rsp + 28] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xc000 // add al, al + LONG $0x20244402 // add al, byte [rsp + 32] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x03438845 // mov byte [r11 + 3], r8b + LONG $0x03538841 // mov byte [r11 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c38349 // add r11, 4 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 @@ -21656,13 +22680,13 @@ LBB4_98: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 JL LBB4_129 - JMP LBB4_159 + JMP LBB4_165 LBB4_100: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_101: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -21704,13 +22728,13 @@ LBB4_103: LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 JNE LBB4_103 - JMP LBB4_156 + JMP LBB4_157 LBB4_104: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_105: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -21725,7 +22749,7 @@ LBB4_107: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_108: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -21773,7 +22797,7 @@ LBB4_111: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_112: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -21821,7 +22845,7 @@ LBB4_115: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_116: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -21838,7 +22862,7 @@ LBB4_118: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_119: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -21855,7 +22879,7 @@ LBB4_121: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_122: WORD $0x894d; BYTE $0xd0 // mov r8, r10 @@ -21873,7 +22897,7 @@ LBB4_124: LBB4_125: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 @@ -21884,7 +22908,7 @@ LBB4_125: WORD $0xf631 // xor esi, esi QUAD $0x00000178249c8b4c // mov r11, qword [rsp + 376] -LBB4_153: +LBB4_154: LONG $0x34343845 // cmp byte [r12 + rsi], r14b WORD $0x950f; BYTE $0xd3 // setne bl WORD $0xdbf6 // neg bl @@ -21911,25 +22935,25 @@ LBB4_153: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB4_153 + JNE LBB4_154 JMP LBB4_162 LBB4_128: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 LBB4_129: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB4_154 + JNE LBB4_155 LBB4_130: WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB4_156 + JMP LBB4_157 LBB4_131: WORD $0x894d; BYTE $0xdd // mov r13, r11 @@ -21944,7 +22968,7 @@ LBB4_132: LBB4_133: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB4_159 + JGE LBB4_165 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 @@ -21956,30 +22980,34 @@ LBB4_127: JMP LBB4_163 LBB4_136: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 LBB4_137: LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xc808 // or al, cl WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + LONG $0x14b60f45; BYTE $0x3f // movzx r10d, byte [r15 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b + WORD $0x3044; BYTE $0xd3 // xor bl, r10b LONG $0x3f1c8841 // mov byte [r15 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] LONG $0x10768d48 // lea rsi, [rsi + 16] + LONG $0xd29a0f41 // setp r10b WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xd0 // or al, r10b WORD $0xd8f6 // neg al WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 @@ -21988,14 +23016,14 @@ LBB4_137: WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 JNE LBB4_137 LBB4_138: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_159 + JE LBB4_165 LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - JMP LBB4_158 + JMP LBB4_152 LBB4_140: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -22035,9 +23063,9 @@ LBB4_141: LBB4_142: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_159 + JE LBB4_165 LONG $0x2e394466 // cmp word [rsi], r13w - JMP LBB4_158 + JMP LBB4_159 LBB4_144: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -22077,35 +23105,39 @@ LBB4_145: LBB4_146: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_159 + JE LBB4_165 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - JMP LBB4_158 + JMP LBB4_159 LBB4_148: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 LBB4_149: LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xc808 // or al, cl WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + LONG $0x14b60f45; BYTE $0x3f // movzx r10d, byte [r15 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b + WORD $0x3044; BYTE $0xd3 // xor bl, r10b LONG $0x3f1c8841 // mov byte [r15 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] LONG $0x08768d48 // lea rsi, [rsi + 8] + LONG $0xd29a0f41 // setp r10b WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xd0 // or al, r10b WORD $0xd8f6 // neg al WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 @@ -22114,22 +23146,39 @@ LBB4_149: WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 JNE LBB4_149 LBB4_150: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_159 + JE LBB4_165 LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - JMP LBB4_158 -LBB4_154: +LBB4_152: + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xd8 // mov rax, r11 + LONG $0x03e8c148 // shr rax, 3 + LONG $0x06348a41 // mov sil, byte [r14 + rax] + LONG $0x07e38041 // and r11b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x061c8841 // mov byte [r14 + rax], bl + JMP LBB4_165 + +LBB4_155: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB4_155: +LBB4_156: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -22157,14 +23206,14 @@ LBB4_155: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB4_155 + JNE LBB4_156 -LBB4_156: +LBB4_157: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_159 + JE LBB4_165 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d -LBB4_158: +LBB4_159: WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 @@ -22178,11 +23227,7 @@ LBB4_158: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xf3 // xor bl, sil LONG $0x161c8841 // mov byte [r14 + rdx], bl - -LBB4_159: - MOVQ 1280(SP), SP - VZEROUPPER - RET + JMP LBB4_165 LBB4_160: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -22224,7 +23269,7 @@ LBB4_162: LBB4_163: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_159 + JE LBB4_165 LONG $0x24343845 // cmp byte [r12], r14b WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -22240,9 +23285,13 @@ LBB4_163: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB4_159 LBB4_165: + MOVQ 1280(SP), SP + VZEROUPPER + RET + +LBB4_166: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 @@ -22258,7 +23307,7 @@ LBB4_165: WORD $0xc031 // xor eax, eax QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 -LBB4_166: +LBB4_167: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000019824848948 // mov qword [rsp + 408], rax LONG $0x05e3c148 // shl rbx, 5 @@ -24350,7 +25399,7 @@ LBB4_166: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB4_166 + JNE LBB4_167 QUAD $0x0000018824bc8b4c // mov r15, qword [rsp + 392] QUAD $0x0000018024bc3b4c // cmp r15, qword [rsp + 384] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] @@ -24359,7 +25408,7 @@ LBB4_166: JNE LBB4_35 JMP LBB4_133 -LBB4_168: +LBB4_169: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 @@ -24374,7 +25423,7 @@ LBB4_168: WORD $0xc031 // xor eax, eax QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB4_169: +LBB4_170: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000019824848948 // mov qword [rsp + 408], rax LONG $0x05e3c148 // shl rbx, 5 @@ -26471,7 +27520,7 @@ LBB4_169: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB4_169 + JNE LBB4_170 QUAD $0x0000018824bc8b4c // mov r15, qword [rsp + 392] QUAD $0x0000018024bc3b4c // cmp r15, qword [rsp + 384] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] @@ -26525,17 +27574,17 @@ TEXT ·_comparison_not_equal_scalar_arr_avx2(SB), $1320-48 LEAQ LCDATA4<>(SB), BP WORD $0x894d; BYTE $0xc2 // mov r10, r8 - WORD $0x8949; BYTE $0xcf // mov r15, rcx + WORD $0x8949; BYTE $0xcd // mov r13, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB5_17 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JLE LBB5_32 WORD $0xff83; BYTE $0x04 // cmp edi, 4 - JE LBB5_60 + JE LBB5_63 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB5_72 + JE LBB5_75 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB5_157 + JNE LBB5_164 WORD $0x8b44; BYTE $0x36 // mov r14d, dword [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -26557,7 +27606,8 @@ LBB5_7: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - LONG $0x04b60f45; BYTE $0x37 // movzx r8d, byte [r15 + rsi] + WORD $0x894d; BYTE $0xe9 // mov r9, r13 + LONG $0x44b60f45; WORD $0x0035 // movzx r8d, byte [r13 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -26566,22 +27616,22 @@ LBB5_7: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x373c8841 // mov byte [r15 + rsi], dil + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB5_7 - LONG $0x01c78349 // add r15, 1 + LONG $0x01c58349 // add r13, 1 LBB5_9: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB5_13 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 LBB5_11: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0x7c723b44 // cmp r14d, dword [rdx + 124] LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0x78723b44 // cmp r14d, dword [rdx + 120] @@ -26591,31 +27641,31 @@ LBB5_11: LONG $0x70723b44 // cmp r14d, dword [rdx + 112] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x6c723b44 // cmp r14d, dword [rdx + 108] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x68723b44 // cmp r14d, dword [rdx + 104] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x64723b44 // cmp r14d, dword [rdx + 100] + LONG $0x68723b44 // cmp r14d, dword [rdx + 104] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x64723b44 // cmp r14d, dword [rdx + 100] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x5c723b44 // cmp r14d, dword [rdx + 92] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x58723b44 // cmp r14d, dword [rdx + 88] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x54723b44 // cmp r14d, dword [rdx + 84] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x50723b44 // cmp r14d, dword [rdx + 80] LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x4c723b44 // cmp r14d, dword [rdx + 76] + LONG $0x50723b44 // cmp r14d, dword [rdx + 80] LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x48723b44 // cmp r14d, dword [rdx + 72] + LONG $0x4c723b44 // cmp r14d, dword [rdx + 76] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x44723b44 // cmp r14d, dword [rdx + 68] + LONG $0x48723b44 // cmp r14d, dword [rdx + 72] QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x44723b44 // cmp r14d, dword [rdx + 68] + QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x3c723b44 // cmp r14d, dword [rdx + 60] LONG $0xd0950f41 // setne r8b LONG $0x38723b44 // cmp r14d, dword [rdx + 56] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x34723b44 // cmp r14d, dword [rdx + 52] LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x34723b44 // cmp r14d, dword [rdx + 52] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x30723b44 // cmp r14d, dword [rdx + 48] LONG $0xd3950f41 // setne r11b LONG $0x2c723b44 // cmp r14d, dword [rdx + 44] @@ -26633,28 +27683,28 @@ LBB5_11: LONG $0x10723b44 // cmp r14d, dword [rdx + 16] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x0c723b44 // cmp r14d, dword [rdx + 12] - LONG $0xd5950f41 // setne r13b - LONG $0x08723b44 // cmp r14d, dword [rdx + 8] LONG $0xd4950f41 // setne r12b + LONG $0x08723b44 // cmp r14d, dword [rdx + 8] + LONG $0xd5950f41 // setne r13b WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x04723b44 // cmp r14d, dword [rdx + 4] LONG $0xd7950f41 // setne r15b LONG $0x20723b44 // cmp r14d, dword [rdx + 32] QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x40723b44 // cmp r14d, dword [rdx + 64] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x60723b44 // cmp r14d, dword [rdx + 96] - QUAD $0x000000882494950f // setne byte [rsp + 136] + QUAD $0x000000802494950f // setne byte [rsp + 128] WORD $0x0045; BYTE $0xff // add r15b, r15b - QUAD $0x000000a824bc0244 // add r15b, byte [rsp + 168] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xfc // or r12b, r15b + QUAD $0x000000b024bc0244 // add r15b, byte [rsp + 176] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0845; BYTE $0xfd // or r13b, r15b QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0844; BYTE $0xe1 // or cl, r12b LONG $0x05e6c040 // shl sil, 5 WORD $0x0840; BYTE $0xce // or sil, cl WORD $0xe3c0; BYTE $0x06 // shl bl, 6 @@ -26670,32 +27720,32 @@ LBB5_11: WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e3c041 // shl r11b, 4 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xd8 // or al, r11b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x01478845 // mov byte [r15 + 1], r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xc000 // add al, al - LONG $0x70244402 // add al, byte [rsp + 112] + LONG $0x68244402 // add al, byte [rsp + 104] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -26706,15 +27756,15 @@ LBB5_11: WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02478841 // mov byte [r15 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x88248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 136] + LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -26735,64 +27785,66 @@ LBB5_11: LONG $0x03478841 // mov byte [r15 + 3], al LONG $0x80ea8348 // sub rdx, -128 LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 JNE LBB5_11 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] LBB5_13: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JE LBB5_127 + JE LBB5_133 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xff31 // xor edi, edi LBB5_16: - WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x04723b44 // cmp r14d, dword [rdx + 4] - LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi + WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xeb // mov r11, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + LONG $0x02c78348 // add rdi, 2 + LONG $0x04723b44 // cmp r14d, dword [rdx + 4] + LONG $0x08528d48 // lea rdx, [rdx + 8] + LONG $0xd1950f41 // setne r9b + WORD $0xf641; BYTE $0xd9 // neg r9b + WORD $0x3041; BYTE $0xd9 // xor r9b, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0x2044; BYTE $0xc8 // and al, r9b + WORD $0xd830 // xor al, bl + LONG $0x35448841; BYTE $0x00 // mov byte [r13 + rsi], al + WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB5_16 - JMP LBB5_154 + JMP LBB5_160 LBB5_17: WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JLE LBB5_46 + JLE LBB5_49 WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB5_83 + JE LBB5_86 WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB5_94 + JE LBB5_97 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB5_157 + JNE LBB5_164 LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xda490f4d // cmovns r11, r10 @@ -26806,212 +27858,304 @@ LBB5_17: WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LBB5_23: - LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - LONG $0x08528d48 // lea rdx, [rdx + 8] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl - LONG $0x07708d48 // lea rsi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf0490f48 // cmovns rsi, rax - LONG $0x03fec148 // sar rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b - QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] - WORD $0xc189 // mov ecx, eax - WORD $0x2944; BYTE $0xc1 // sub ecx, r8d - LONG $0x000001bf; BYTE $0x00 // mov edi, 1 - WORD $0xe7d3 // shl edi, cl - WORD $0x2040; BYTE $0xdf // and dil, bl - WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x373c8841 // mov byte [r15 + rsi], dil - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] + LONG $0x08528d48 // lea rdx, [rdx + 8] + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xcb08 // or bl, cl + WORD $0xdbf6 // neg bl + LONG $0x07708d48 // lea rsi, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xf0490f48 // cmovns rsi, rax + LONG $0x03fec148 // sar rsi, 3 + WORD $0x894d; BYTE $0xee // mov r14, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0x3044; BYTE $0xcb // xor bl, r9b + QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] + WORD $0xc189 // mov ecx, eax + WORD $0x2944; BYTE $0xc1 // sub ecx, r8d + LONG $0x000001bf; BYTE $0x00 // mov edi, 1 + WORD $0xe7d3 // shl edi, cl + WORD $0x2040; BYTE $0xdf // and dil, bl + WORD $0x3044; BYTE $0xcf // xor dil, r9b + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 JNE LBB5_23 - LONG $0x01c78349 // add r15, 1 + LONG $0x01c58349 // add r13, 1 LBB5_25: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 JL LBB5_29 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 - QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 + QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 + QUAD $0x000000b8249c894c // mov qword [rsp + 184], r11 LBB5_27: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - QUAD $0x000000982494950f // setne byte [rsp + 152] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x422ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rdx + 8] - LONG $0xd1950f41 // setne r9b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al LONG $0x422ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rdx + 16] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x422ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rdx + 24] - LONG $0xd5950f41 // setne r13b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl LONG $0x422ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rdx + 32] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x422ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rdx + 40] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al LONG $0x422ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rdx + 48] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x422ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rdx + 56] - LONG $0xd4950f41 // setne r12b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl LONG $0x422ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rdx + 64] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x422ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rdx + 72] - LONG $0xd6950f40 // setne sil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x422ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rdx + 80] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x422ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rdx + 88] - LONG $0xd0950f41 // setne r8b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x422ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rdx + 96] - LONG $0xd2950f41 // setne r10b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl LONG $0x422ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rdx + 104] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x422ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rdx + 112] - QUAD $0x000000802494950f // setne byte [rsp + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl LONG $0x422ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rdx + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl QUAD $0x00000080822ef9c5 // vucomisd xmm0, qword [rdx + 128] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl QUAD $0x00000088822ef9c5 // vucomisd xmm0, qword [rdx + 136] - QUAD $0x000000882494950f // setne byte [rsp + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl QUAD $0x00000090822ef9c5 // vucomisd xmm0, qword [rdx + 144] - QUAD $0x000000902494950f // setne byte [rsp + 144] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl QUAD $0x00000098822ef9c5 // vucomisd xmm0, qword [rdx + 152] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl QUAD $0x000000a0822ef9c5 // vucomisd xmm0, qword [rdx + 160] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl QUAD $0x000000a8822ef9c5 // vucomisd xmm0, qword [rdx + 168] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl QUAD $0x000000b0822ef9c5 // vucomisd xmm0, qword [rdx + 176] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al QUAD $0x000000b8822ef9c5 // vucomisd xmm0, qword [rdx + 184] - LONG $0xd6950f41 // setne r14b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al QUAD $0x000000c0822ef9c5 // vucomisd xmm0, qword [rdx + 192] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl QUAD $0x000000c8822ef9c5 // vucomisd xmm0, qword [rdx + 200] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al QUAD $0x000000d0822ef9c5 // vucomisd xmm0, qword [rdx + 208] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000d8822ef9c5 // vucomisd xmm0, qword [rdx + 216] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al QUAD $0x000000e0822ef9c5 // vucomisd xmm0, qword [rdx + 224] - QUAD $0x000001402494950f // setne byte [rsp + 320] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl QUAD $0x000000e8822ef9c5 // vucomisd xmm0, qword [rdx + 232] - QUAD $0x000001202494950f // setne byte [rsp + 288] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xa8248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 168], cl QUAD $0x000000f0822ef9c5 // vucomisd xmm0, qword [rdx + 240] - LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al QUAD $0x000000f8822ef9c5 // vucomisd xmm0, qword [rdx + 248] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000098248c0244 // add r9b, byte [rsp + 152] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e4c041 // shl r12b, 7 - WORD $0x0841; BYTE $0xdc // or r12b, bl - LONG $0x02e3c041 // shl r11b, 2 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x78 // add sil, byte [rsp + 120] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xdd // or r13b, r11b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] - WORD $0xe3c0; BYTE $0x04 // shl bl, 4 - WORD $0x0844; BYTE $0xeb // or bl, r13b - WORD $0xde89 // mov esi, ebx - LONG $0x03e0c041 // shl r8b, 3 - WORD $0x0841; BYTE $0xf8 // or r8b, dil - LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] - WORD $0xe3c0; BYTE $0x05 // shl bl, 5 - WORD $0x0840; BYTE $0xf3 // or bl, sil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xc2 // or r10b, r8b - LONG $0x05e7c041 // shl r15b, 5 - WORD $0x0845; BYTE $0xd7 // or r15b, r10b - QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xdc // or r12b, bl - WORD $0x0844; BYTE $0xf9 // or cl, r15b - QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] - WORD $0xdb00 // add bl, bl - LONG $0x58245c02 // add bl, byte [rsp + 88] - WORD $0xde89 // mov esi, ebx - QUAD $0x00000090249cb60f // movzx ebx, byte [rsp + 144] - WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f40 // setne sil + WORD $0x0840; BYTE $0xc6 // or sil, al + WORD $0x0045; BYTE $0xed // add r13b, r13b + LONG $0x246c0244; BYTE $0x30 // add r13b, byte [rsp + 48] + WORD $0x8944; BYTE $0xe8 // mov eax, r13d + LONG $0x05e4c041 // shl r12b, 5 + LONG $0x6cb60f44; WORD $0x4824 // movzx r13d, byte [rsp + 72] + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x64b60f44; WORD $0x3824 // movzx r12d, byte [rsp + 56] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + WORD $0xc900 // add cl, cl + LONG $0x28244c02 // add cl, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe0 // or al, r12b WORD $0xe3c0; BYTE $0x03 // shl bl, 3 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] - WORD $0xe3c0; BYTE $0x04 // shl bl, 4 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x05 // shl bl, 5 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0x8845; BYTE $0x27 // mov byte [r15], r12b - LONG $0x2474b60f; BYTE $0x48 // movzx esi, byte [rsp + 72] - LONG $0x06e6c040 // shl sil, 6 + WORD $0xcb08 // or bl, cl + LONG $0x6cb60f44; WORD $0x2024 // movzx r13d, byte [rsp + 32] + LONG $0x04e5c041 // shl r13b, 4 + WORD $0x0841; BYTE $0xc5 // or r13b, al + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + LONG $0x20248488; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], al + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x00008824b4b60f44; BYTE $0x00 // movzx r14d, byte [rsp + 136] LONG $0x07e6c041 // shl r14b, 7 - WORD $0x0841; BYTE $0xf6 // or r14b, sil - LONG $0x014f8841 // mov byte [r15 + 1], cl - WORD $0x0841; BYTE $0xde // or r14b, bl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + WORD $0x0841; BYTE $0xc6 // or r14b, al + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + LONG $0x58244c02 // add cl, byte [rsp + 88] + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xcb08 // or bl, cl + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - WORD $0xe0c0; BYTE $0x07 // shl al, 7 - WORD $0xd808 // or al, bl - WORD $0xc808 // or al, cl - LONG $0x02778845 // mov byte [r15 + 2], r14b - LONG $0x03478841 // mov byte [r15 + 3], al + WORD $0x8941; BYTE $0xcc // mov r12d, ecx + QUAD $0x00000110248c8b48 // mov rcx, qword [rsp + 272] + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0x0844; BYTE $0xe8 // or al, r13b + QUAD $0x0000b024a4b60f44; BYTE $0x00 // movzx r12d, byte [rsp + 176] + LONG $0x05e4c041 // shl r12b, 5 + LONG $0x06e7c041 // shl r15b, 6 + WORD $0x0845; BYTE $0xe7 // or r15b, r12b + QUAD $0x0000012024b40a44 // or r14b, byte [rsp + 288] + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf8 // or r8b, r15b + WORD $0x0841; BYTE $0xd8 // or r8b, bl + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x00000098249c0244 // add r11b, byte [rsp + 152] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + QUAD $0x000000a0249cb60f // movzx ebx, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xcb // or bl, r9b + WORD $0x8941; BYTE $0xd9 // mov r9d, ebx + WORD $0x0188 // mov byte [rcx], al + QUAD $0x000000a8249cb60f // movzx ebx, byte [rsp + 168] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x01718844 // mov byte [rcx + 1], r14b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0844; BYTE $0xce // or sil, r9b + LONG $0x02418844 // mov byte [rcx + 2], r8b + LONG $0x03718840 // mov byte [rcx + 3], sil LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + LONG $0x04c18348 // add rcx, 4 + WORD $0x8949; BYTE $0xcd // mov r13, rcx + QUAD $0x000000b824848348; BYTE $0xff // add qword [rsp + 184], -1 JNE LBB5_27 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] + QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] LBB5_29: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB5_136 + JNE LBB5_134 WORD $0xff31 // xor edi, edi - JMP LBB5_138 + JMP LBB5_136 LBB5_32: WORD $0xff83; BYTE $0x02 // cmp edi, 2 - JE LBB5_105 + JE LBB5_108 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB5_157 + JNE LBB5_164 WORD $0x8a44; BYTE $0x1e // mov r11b, byte [rsi] LONG $0x1f728d4d // lea r14, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -27025,33 +28169,34 @@ LBB5_32: WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LBB5_36: - WORD $0x3a44; BYTE $0x1a // cmp r11b, byte [rdx] - LONG $0x01528d48 // lea rdx, [rdx + 1] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl - LONG $0x07708d48 // lea rsi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf0490f48 // cmovns rsi, rax - LONG $0x03fec148 // sar rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b - QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] - WORD $0xc189 // mov ecx, eax - WORD $0x2944; BYTE $0xc1 // sub ecx, r8d - LONG $0x000001bf; BYTE $0x00 // mov edi, 1 - WORD $0xe7d3 // shl edi, cl - WORD $0x2040; BYTE $0xdf // and dil, bl - WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x373c8841 // mov byte [r15 + rsi], dil - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + WORD $0x3a44; BYTE $0x1a // cmp r11b, byte [rdx] + LONG $0x01528d48 // lea rdx, [rdx + 1] + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xdbf6 // neg bl + LONG $0x07708d48 // lea rsi, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xf0490f48 // cmovns rsi, rax + LONG $0x03fec148 // sar rsi, 3 + WORD $0x894d; BYTE $0xef // mov r15, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0x3044; BYTE $0xcb // xor bl, r9b + QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] + WORD $0xc189 // mov ecx, eax + WORD $0x2944; BYTE $0xc1 // sub ecx, r8d + LONG $0x000001bf; BYTE $0x00 // mov edi, 1 + WORD $0xe7d3 // shl edi, cl + WORD $0x2040; BYTE $0xdf // and dil, bl + WORD $0x3044; BYTE $0xcf // xor dil, r9b + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 JNE LBB5_36 - LONG $0x01c78349 // add r15, 1 + LONG $0x01c58349 // add r13, 1 LBB5_38: LONG $0x05fec149 // sar r14, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_128 + JL LBB5_46 LONG $0x20fe8349 // cmp r14, 32 LONG $0x245c8944; BYTE $0x1c // mov dword [rsp + 28], r11d QUAD $0x000001182494894c // mov qword [rsp + 280], r10 @@ -27060,20 +28205,20 @@ LBB5_38: WORD $0x894c; BYTE $0xf0 // mov rax, r14 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx - WORD $0x3949; BYTE $0xc7 // cmp r15, rax + WORD $0x3949; BYTE $0xc5 // cmp r13, rax JAE LBB5_165 - LONG $0xb7048d4b // lea rax, [r15 + 4*r14] + QUAD $0x00000000b5048d4a // lea rax, [4*r14] + WORD $0x014c; BYTE $0xe8 // add rax, r13 WORD $0x3948; BYTE $0xc2 // cmp rdx, rax JAE LBB5_165 LBB5_42: WORD $0xc031 // xor eax, eax QUAD $0x0000017824848948 // mov qword [rsp + 376], rax - WORD $0x894d; BYTE $0xfd // mov r13, r15 LBB5_43: QUAD $0x0000017824b42b4c // sub r14, qword [rsp + 376] - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 + QUAD $0x000000a024b4894c // mov qword [rsp + 160], r14 LBB5_44: LONG $0x1f5a3a44 // cmp r11b, byte [rdx + 31] @@ -27085,31 +28230,31 @@ LBB5_44: LONG $0x1c5a3a44 // cmp r11b, byte [rdx + 28] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x1b5a3a44 // cmp r11b, byte [rdx + 27] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x1a5a3a44 // cmp r11b, byte [rdx + 26] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x195a3a44 // cmp r11b, byte [rdx + 25] + LONG $0x1a5a3a44 // cmp r11b, byte [rdx + 26] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x195a3a44 // cmp r11b, byte [rdx + 25] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x175a3a44 // cmp r11b, byte [rdx + 23] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x165a3a44 // cmp r11b, byte [rdx + 22] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x155a3a44 // cmp r11b, byte [rdx + 21] LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x145a3a44 // cmp r11b, byte [rdx + 20] + LONG $0x155a3a44 // cmp r11b, byte [rdx + 21] LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x145a3a44 // cmp r11b, byte [rdx + 20] + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] LONG $0x135a3a44 // cmp r11b, byte [rdx + 19] - QUAD $0x000000902494950f // setne byte [rsp + 144] - LONG $0x125a3a44 // cmp r11b, byte [rdx + 18] QUAD $0x000000882494950f // setne byte [rsp + 136] - LONG $0x115a3a44 // cmp r11b, byte [rdx + 17] + LONG $0x125a3a44 // cmp r11b, byte [rdx + 18] QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x115a3a44 // cmp r11b, byte [rdx + 17] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x0f5a3a44 // cmp r11b, byte [rdx + 15] LONG $0xd6950f41 // setne r14b LONG $0x0e5a3a44 // cmp r11b, byte [rdx + 14] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] - LONG $0x0d5a3a44 // cmp r11b, byte [rdx + 13] LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x0d5a3a44 // cmp r11b, byte [rdx + 13] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x0c5a3a44 // cmp r11b, byte [rdx + 12] LONG $0xd4950f41 // setne r12b LONG $0x0b5a3a44 // cmp r11b, byte [rdx + 11] @@ -27124,7 +28269,7 @@ LBB5_44: LONG $0xd7950f40 // setne dil LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] LONG $0xd1950f41 // setne r9b @@ -27139,7 +28284,7 @@ LBB5_44: WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x023a // cmp al, byte [rdx] - QUAD $0x000000a02494950f // setne byte [rsp + 160] + QUAD $0x000000a82494950f // setne byte [rsp + 168] LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x423a; BYTE $0x01 // cmp al, byte [rdx + 1] WORD $0x950f; BYTE $0xd0 // setne al @@ -27150,12 +28295,12 @@ LBB5_44: QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x1c245c8b // mov ebx, dword [rsp + 28] WORD $0x5a3a; BYTE $0x10 // cmp bl, byte [rdx + 16] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x1c245c8b // mov ebx, dword [rsp + 28] WORD $0x5a3a; BYTE $0x18 // cmp bl, byte [rdx + 24] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] WORD $0xc000 // add al, al - LONG $0xa0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 160] + LONG $0xa8248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 168] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xc108 // or cl, al LONG $0x03e6c040 // shl sil, 3 @@ -27164,7 +28309,7 @@ LBB5_44: WORD $0x0841; BYTE $0xf0 // or r8b, sil LONG $0x05e1c041 // shl r9b, 5 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000a82484b60f // movzx eax, byte [rsp + 168] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e7c040 // shl dil, 7 WORD $0x0840; BYTE $0xc7 // or dil, al @@ -27179,51 +28324,51 @@ LBB5_44: LONG $0x245c8b44; BYTE $0x1c // mov r11d, dword [rsp + 28] LONG $0x04e4c041 // shl r12b, 4 WORD $0x0845; BYTE $0xfc // or r12b, r15b - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xe0 // or al, r12b - LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e6c041 // shl r14b, 7 WORD $0x0841; BYTE $0xce // or r14b, cl WORD $0x0841; BYTE $0xc6 // or r14b, al LONG $0x01758845 // mov byte [r13 + 1], r14b - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0x50244402 // add al, byte [rsp + 80] + LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] WORD $0xc189 // mov ecx, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02458841 // mov byte [r13 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al LONG $0x40244402 // add al, byte [rsp + 64] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -27244,17 +28389,59 @@ LBB5_44: LONG $0x03458841 // mov byte [r13 + 3], al LONG $0x20c28348 // add rdx, 32 LONG $0x04c58349 // add r13, 4 - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 JNE LBB5_44 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x0000018024b48b4c // mov r14, qword [rsp + 384] - JMP LBB5_129 LBB5_46: + LONG $0x05e6c149 // shl r14, 5 + WORD $0x394d; BYTE $0xd6 // cmp r14, r10 + JGE LBB5_164 + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0x294d; BYTE $0xf0 // sub r8, r14 + WORD $0xf749; BYTE $0xd6 // not r14 + WORD $0x014d; BYTE $0xd6 // add r14, r10 + JE LBB5_122 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf631 // xor esi, esi + +LBB5_139: + LONG $0x321c3a44 // cmp r11b, byte [rdx + rsi] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xf7 // mov rdi, rsi + LONG $0x03efc148 // shr rdi, 3 + WORD $0xf189 // mov ecx, esi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + LONG $0x4cb60f45; WORD $0x003d // movzx r9d, byte [r13 + rdi] + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x3d5c8841; BYTE $0x00 // mov byte [r13 + rdi], bl + LONG $0x325c3a44; BYTE $0x01 // cmp r11b, byte [rdx + rsi + 1] + LONG $0x02768d48 // lea rsi, [rsi + 2] + LONG $0xd1950f41 // setne r9b + WORD $0xf641; BYTE $0xd9 // neg r9b + WORD $0x3041; BYTE $0xd9 // xor r9b, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0x2044; BYTE $0xc8 // and al, r9b + WORD $0xd830 // xor al, bl + LONG $0x3d448841; BYTE $0x00 // mov byte [r13 + rdi], al + WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + JNE LBB5_139 + JMP LBB5_155 + +LBB5_49: WORD $0xff83; BYTE $0x07 // cmp edi, 7 - JE LBB5_117 + JE LBB5_123 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB5_157 + JNE LBB5_164 WORD $0x8b4c; BYTE $0x36 // mov r14, qword [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -27264,10 +28451,10 @@ LBB5_46: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_52 + JE LBB5_55 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB5_50: +LBB5_53: WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] WORD $0x950f; BYTE $0xd3 // setne bl @@ -27276,7 +28463,8 @@ LBB5_50: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - LONG $0x04b60f45; BYTE $0x37 // movzx r8d, byte [r15 + rsi] + WORD $0x894d; BYTE $0xe9 // mov r9, r13 + LONG $0x44b60f45; WORD $0x0035 // movzx r8d, byte [r13 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -27285,22 +28473,22 @@ LBB5_50: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x373c8841 // mov byte [r15 + rsi], dil + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_50 - LONG $0x01c78349 // add r15, 1 + JNE LBB5_53 + LONG $0x01c58349 // add r13, 1 -LBB5_52: +LBB5_55: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_56 + JL LBB5_59 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 -LBB5_54: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 +LBB5_57: + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0xf8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 248] LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0xf0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 240] @@ -27310,31 +28498,31 @@ LBB5_54: LONG $0xe0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 224] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0xd8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 216] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0xd0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 208] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0xc8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 200] + LONG $0xd0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 208] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0xc8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 200] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0xb8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 184] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0xb0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 176] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0xa8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 168] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0xa0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 160] LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x98b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 152] + LONG $0xa0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 160] LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x90b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 144] + LONG $0x98b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 152] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x88b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 136] + LONG $0x90b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 144] QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x88b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 136] + QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x78723b4c // cmp r14, qword [rdx + 120] LONG $0xd0950f41 // setne r8b LONG $0x70723b4c // cmp r14, qword [rdx + 112] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x68723b4c // cmp r14, qword [rdx + 104] LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x68723b4c // cmp r14, qword [rdx + 104] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x60723b4c // cmp r14, qword [rdx + 96] LONG $0xd3950f41 // setne r11b LONG $0x58723b4c // cmp r14, qword [rdx + 88] @@ -27352,28 +28540,28 @@ LBB5_54: LONG $0x20723b4c // cmp r14, qword [rdx + 32] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x18723b4c // cmp r14, qword [rdx + 24] - LONG $0xd5950f41 // setne r13b - LONG $0x10723b4c // cmp r14, qword [rdx + 16] LONG $0xd4950f41 // setne r12b + LONG $0x10723b4c // cmp r14, qword [rdx + 16] + LONG $0xd5950f41 // setne r13b WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x08723b4c // cmp r14, qword [rdx + 8] LONG $0xd7950f41 // setne r15b LONG $0x40723b4c // cmp r14, qword [rdx + 64] QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x80b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 128] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0xc0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 192] - QUAD $0x000000882494950f // setne byte [rsp + 136] + QUAD $0x000000802494950f // setne byte [rsp + 128] WORD $0x0045; BYTE $0xff // add r15b, r15b - QUAD $0x000000a824bc0244 // add r15b, byte [rsp + 168] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xfc // or r12b, r15b + QUAD $0x000000b024bc0244 // add r15b, byte [rsp + 176] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0845; BYTE $0xfd // or r13b, r15b QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0844; BYTE $0xe1 // or cl, r12b LONG $0x05e6c040 // shl sil, 5 WORD $0x0840; BYTE $0xce // or sil, cl WORD $0xe3c0; BYTE $0x06 // shl bl, 6 @@ -27389,32 +28577,32 @@ LBB5_54: WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e3c041 // shl r11b, 4 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xd8 // or al, r11b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x01478845 // mov byte [r15 + 1], r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xc000 // add al, al - LONG $0x70244402 // add al, byte [rsp + 112] + LONG $0x68244402 // add al, byte [rsp + 104] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -27425,15 +28613,15 @@ LBB5_54: WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02478841 // mov byte [r15 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x88248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 136] + LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -27454,56 +28642,58 @@ LBB5_54: LONG $0x03478841 // mov byte [r15 + 3], al LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 - JNE LBB5_54 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + JNE LBB5_57 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] -LBB5_56: +LBB5_59: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JE LBB5_93 + JE LBB5_96 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xff31 // xor edi, edi -LBB5_59: - WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x08723b4c // cmp r14, qword [rdx + 8] - LONG $0x10528d48 // lea rdx, [rdx + 16] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB5_59 +LBB5_62: + WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xeb // mov r11, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + LONG $0x02c78348 // add rdi, 2 + LONG $0x08723b4c // cmp r14, qword [rdx + 8] + LONG $0x10528d48 // lea rdx, [rdx + 16] + LONG $0xd1950f41 // setne r9b + WORD $0xf641; BYTE $0xd9 // neg r9b + WORD $0x3041; BYTE $0xd9 // xor r9b, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0x2044; BYTE $0xc8 // and al, r9b + WORD $0xd830 // xor al, bl + LONG $0x35448841; BYTE $0x00 // mov byte [r13 + rsi], al + WORD $0x3949; BYTE $0xfa // cmp r10, rdi + JNE LBB5_62 JMP LBB5_146 -LBB5_60: +LBB5_63: LONG $0x36b70f44 // movzx r14d, word [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -27513,10 +28703,10 @@ LBB5_60: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_64 + JE LBB5_67 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB5_62: +LBB5_65: LONG $0x323b4466 // cmp r14w, word [rdx] LONG $0x02528d48 // lea rdx, [rdx + 2] WORD $0x950f; BYTE $0xd3 // setne bl @@ -27525,7 +28715,8 @@ LBB5_62: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - LONG $0x04b60f45; BYTE $0x37 // movzx r8d, byte [r15 + rsi] + WORD $0x894d; BYTE $0xe9 // mov r9, r13 + LONG $0x44b60f45; WORD $0x0035 // movzx r8d, byte [r13 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -27534,22 +28725,22 @@ LBB5_62: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x373c8841 // mov byte [r15 + rsi], dil + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_62 - LONG $0x01c78349 // add r15, 1 + JNE LBB5_65 + LONG $0x01c58349 // add r13, 1 -LBB5_64: +LBB5_67: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_68 + JL LBB5_71 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 -LBB5_66: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 +LBB5_69: + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0x723b4466; BYTE $0x3e // cmp r14w, word [rdx + 62] LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0x723b4466; BYTE $0x3c // cmp r14w, word [rdx + 60] @@ -27559,31 +28750,31 @@ LBB5_66: LONG $0x723b4466; BYTE $0x38 // cmp r14w, word [rdx + 56] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x723b4466; BYTE $0x36 // cmp r14w, word [rdx + 54] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x723b4466; BYTE $0x34 // cmp r14w, word [rdx + 52] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x723b4466; BYTE $0x32 // cmp r14w, word [rdx + 50] + LONG $0x723b4466; BYTE $0x34 // cmp r14w, word [rdx + 52] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x723b4466; BYTE $0x32 // cmp r14w, word [rdx + 50] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x723b4466; BYTE $0x2e // cmp r14w, word [rdx + 46] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x723b4466; BYTE $0x2c // cmp r14w, word [rdx + 44] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x723b4466; BYTE $0x2a // cmp r14w, word [rdx + 42] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x723b4466; BYTE $0x28 // cmp r14w, word [rdx + 40] LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x723b4466; BYTE $0x26 // cmp r14w, word [rdx + 38] + LONG $0x723b4466; BYTE $0x28 // cmp r14w, word [rdx + 40] LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x723b4466; BYTE $0x24 // cmp r14w, word [rdx + 36] + LONG $0x723b4466; BYTE $0x26 // cmp r14w, word [rdx + 38] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x723b4466; BYTE $0x22 // cmp r14w, word [rdx + 34] + LONG $0x723b4466; BYTE $0x24 // cmp r14w, word [rdx + 36] QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x723b4466; BYTE $0x22 // cmp r14w, word [rdx + 34] + QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x723b4466; BYTE $0x1e // cmp r14w, word [rdx + 30] LONG $0xd0950f41 // setne r8b LONG $0x723b4466; BYTE $0x1c // cmp r14w, word [rdx + 28] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x723b4466; BYTE $0x1a // cmp r14w, word [rdx + 26] LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x723b4466; BYTE $0x1a // cmp r14w, word [rdx + 26] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x723b4466; BYTE $0x18 // cmp r14w, word [rdx + 24] LONG $0xd3950f41 // setne r11b LONG $0x723b4466; BYTE $0x16 // cmp r14w, word [rdx + 22] @@ -27601,28 +28792,28 @@ LBB5_66: LONG $0x723b4466; BYTE $0x08 // cmp r14w, word [rdx + 8] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x723b4466; BYTE $0x06 // cmp r14w, word [rdx + 6] - LONG $0xd5950f41 // setne r13b - LONG $0x723b4466; BYTE $0x04 // cmp r14w, word [rdx + 4] LONG $0xd4950f41 // setne r12b + LONG $0x723b4466; BYTE $0x04 // cmp r14w, word [rdx + 4] + LONG $0xd5950f41 // setne r13b LONG $0x323b4466 // cmp r14w, word [rdx] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x723b4466; BYTE $0x02 // cmp r14w, word [rdx + 2] LONG $0xd7950f41 // setne r15b LONG $0x723b4466; BYTE $0x10 // cmp r14w, word [rdx + 16] QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x723b4466; BYTE $0x20 // cmp r14w, word [rdx + 32] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x723b4466; BYTE $0x30 // cmp r14w, word [rdx + 48] - QUAD $0x000000882494950f // setne byte [rsp + 136] + QUAD $0x000000802494950f // setne byte [rsp + 128] WORD $0x0045; BYTE $0xff // add r15b, r15b - QUAD $0x000000a824bc0244 // add r15b, byte [rsp + 168] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xfc // or r12b, r15b + QUAD $0x000000b024bc0244 // add r15b, byte [rsp + 176] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0845; BYTE $0xfd // or r13b, r15b QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0844; BYTE $0xe1 // or cl, r12b LONG $0x05e6c040 // shl sil, 5 WORD $0x0840; BYTE $0xce // or sil, cl WORD $0xe3c0; BYTE $0x06 // shl bl, 6 @@ -27638,32 +28829,32 @@ LBB5_66: WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e3c041 // shl r11b, 4 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xd8 // or al, r11b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x01478845 // mov byte [r15 + 1], r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xc000 // add al, al - LONG $0x70244402 // add al, byte [rsp + 112] + LONG $0x68244402 // add al, byte [rsp + 104] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -27674,15 +28865,15 @@ LBB5_66: WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02478841 // mov byte [r15 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x88248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 136] + LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -27703,56 +28894,58 @@ LBB5_66: LONG $0x03478841 // mov byte [r15 + 3], al LONG $0x40c28348 // add rdx, 64 LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 - JNE LBB5_66 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + JNE LBB5_69 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] -LBB5_68: +LBB5_71: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JE LBB5_82 + JE LBB5_85 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xff31 // xor edi, edi -LBB5_71: - LONG $0x323b4466 // cmp r14w, word [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x723b4466; BYTE $0x02 // cmp r14w, word [rdx + 2] - LONG $0x04528d48 // lea rdx, [rdx + 4] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB5_71 +LBB5_74: + LONG $0x323b4466 // cmp r14w, word [rdx] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xeb // mov r11, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + LONG $0x02c78348 // add rdi, 2 + LONG $0x723b4466; BYTE $0x02 // cmp r14w, word [rdx + 2] + LONG $0x04528d48 // lea rdx, [rdx + 4] + LONG $0xd1950f41 // setne r9b + WORD $0xf641; BYTE $0xd9 // neg r9b + WORD $0x3041; BYTE $0xd9 // xor r9b, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0x2044; BYTE $0xc8 // and al, r9b + WORD $0xd830 // xor al, bl + LONG $0x35448841; BYTE $0x00 // mov byte [r13 + rsi], al + WORD $0x3949; BYTE $0xfa // cmp r10, rdi + JNE LBB5_74 JMP LBB5_142 -LBB5_72: +LBB5_75: LONG $0x36b70f44 // movzx r14d, word [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -27762,10 +28955,10 @@ LBB5_72: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_76 + JE LBB5_79 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB5_74: +LBB5_77: LONG $0x323b4466 // cmp r14w, word [rdx] LONG $0x02528d48 // lea rdx, [rdx + 2] WORD $0x950f; BYTE $0xd3 // setne bl @@ -27774,7 +28967,8 @@ LBB5_74: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - LONG $0x04b60f45; BYTE $0x37 // movzx r8d, byte [r15 + rsi] + WORD $0x894d; BYTE $0xe9 // mov r9, r13 + LONG $0x44b60f45; WORD $0x0035 // movzx r8d, byte [r13 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -27783,22 +28977,22 @@ LBB5_74: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x373c8841 // mov byte [r15 + rsi], dil + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_74 - LONG $0x01c78349 // add r15, 1 + JNE LBB5_77 + LONG $0x01c58349 // add r13, 1 -LBB5_76: +LBB5_79: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_80 + JL LBB5_83 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 -LBB5_78: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 +LBB5_81: + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0x723b4466; BYTE $0x3e // cmp r14w, word [rdx + 62] LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0x723b4466; BYTE $0x3c // cmp r14w, word [rdx + 60] @@ -27808,31 +29002,31 @@ LBB5_78: LONG $0x723b4466; BYTE $0x38 // cmp r14w, word [rdx + 56] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x723b4466; BYTE $0x36 // cmp r14w, word [rdx + 54] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x723b4466; BYTE $0x34 // cmp r14w, word [rdx + 52] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x723b4466; BYTE $0x32 // cmp r14w, word [rdx + 50] + LONG $0x723b4466; BYTE $0x34 // cmp r14w, word [rdx + 52] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x723b4466; BYTE $0x32 // cmp r14w, word [rdx + 50] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x723b4466; BYTE $0x2e // cmp r14w, word [rdx + 46] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x723b4466; BYTE $0x2c // cmp r14w, word [rdx + 44] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x723b4466; BYTE $0x2a // cmp r14w, word [rdx + 42] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x723b4466; BYTE $0x28 // cmp r14w, word [rdx + 40] LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x723b4466; BYTE $0x26 // cmp r14w, word [rdx + 38] + LONG $0x723b4466; BYTE $0x28 // cmp r14w, word [rdx + 40] LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x723b4466; BYTE $0x24 // cmp r14w, word [rdx + 36] + LONG $0x723b4466; BYTE $0x26 // cmp r14w, word [rdx + 38] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x723b4466; BYTE $0x22 // cmp r14w, word [rdx + 34] + LONG $0x723b4466; BYTE $0x24 // cmp r14w, word [rdx + 36] QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x723b4466; BYTE $0x22 // cmp r14w, word [rdx + 34] + QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x723b4466; BYTE $0x1e // cmp r14w, word [rdx + 30] LONG $0xd0950f41 // setne r8b LONG $0x723b4466; BYTE $0x1c // cmp r14w, word [rdx + 28] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x723b4466; BYTE $0x1a // cmp r14w, word [rdx + 26] LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x723b4466; BYTE $0x1a // cmp r14w, word [rdx + 26] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x723b4466; BYTE $0x18 // cmp r14w, word [rdx + 24] LONG $0xd3950f41 // setne r11b LONG $0x723b4466; BYTE $0x16 // cmp r14w, word [rdx + 22] @@ -27850,28 +29044,28 @@ LBB5_78: LONG $0x723b4466; BYTE $0x08 // cmp r14w, word [rdx + 8] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x723b4466; BYTE $0x06 // cmp r14w, word [rdx + 6] - LONG $0xd5950f41 // setne r13b - LONG $0x723b4466; BYTE $0x04 // cmp r14w, word [rdx + 4] LONG $0xd4950f41 // setne r12b + LONG $0x723b4466; BYTE $0x04 // cmp r14w, word [rdx + 4] + LONG $0xd5950f41 // setne r13b LONG $0x323b4466 // cmp r14w, word [rdx] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x723b4466; BYTE $0x02 // cmp r14w, word [rdx + 2] LONG $0xd7950f41 // setne r15b LONG $0x723b4466; BYTE $0x10 // cmp r14w, word [rdx + 16] QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x723b4466; BYTE $0x20 // cmp r14w, word [rdx + 32] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x723b4466; BYTE $0x30 // cmp r14w, word [rdx + 48] - QUAD $0x000000882494950f // setne byte [rsp + 136] + QUAD $0x000000802494950f // setne byte [rsp + 128] WORD $0x0045; BYTE $0xff // add r15b, r15b - QUAD $0x000000a824bc0244 // add r15b, byte [rsp + 168] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xfc // or r12b, r15b + QUAD $0x000000b024bc0244 // add r15b, byte [rsp + 176] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0845; BYTE $0xfd // or r13b, r15b QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0844; BYTE $0xe1 // or cl, r12b LONG $0x05e6c040 // shl sil, 5 WORD $0x0840; BYTE $0xce // or sil, cl WORD $0xe3c0; BYTE $0x06 // shl bl, 6 @@ -27887,32 +29081,32 @@ LBB5_78: WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e3c041 // shl r11b, 4 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xd8 // or al, r11b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x01478845 // mov byte [r15 + 1], r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xc000 // add al, al - LONG $0x70244402 // add al, byte [rsp + 112] + LONG $0x68244402 // add al, byte [rsp + 104] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -27923,15 +29117,15 @@ LBB5_78: WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02478841 // mov byte [r15 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x88248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 136] + LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -27952,26 +29146,27 @@ LBB5_78: LONG $0x03478841 // mov byte [r15 + 3], al LONG $0x40c28348 // add rdx, 64 LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 - JNE LBB5_78 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + JNE LBB5_81 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] -LBB5_80: +LBB5_83: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 JNE LBB5_140 -LBB5_82: +LBB5_85: WORD $0xff31 // xor edi, edi JMP LBB5_142 -LBB5_83: +LBB5_86: WORD $0x8b4c; BYTE $0x36 // mov r14, qword [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -27981,10 +29176,10 @@ LBB5_83: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_87 + JE LBB5_90 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB5_85: +LBB5_88: WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] WORD $0x950f; BYTE $0xd3 // setne bl @@ -27993,7 +29188,8 @@ LBB5_85: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - LONG $0x04b60f45; BYTE $0x37 // movzx r8d, byte [r15 + rsi] + WORD $0x894d; BYTE $0xe9 // mov r9, r13 + LONG $0x44b60f45; WORD $0x0035 // movzx r8d, byte [r13 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -28002,22 +29198,22 @@ LBB5_85: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x373c8841 // mov byte [r15 + rsi], dil + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_85 - LONG $0x01c78349 // add r15, 1 + JNE LBB5_88 + LONG $0x01c58349 // add r13, 1 -LBB5_87: +LBB5_90: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_91 + JL LBB5_94 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 -LBB5_89: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 +LBB5_92: + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0xf8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 248] LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0xf0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 240] @@ -28027,31 +29223,31 @@ LBB5_89: LONG $0xe0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 224] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0xd8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 216] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0xd0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 208] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0xc8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 200] + LONG $0xd0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 208] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0xc8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 200] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0xb8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 184] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0xb0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 176] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0xa8b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 168] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0xa0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 160] LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x98b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 152] + LONG $0xa0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 160] LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x90b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 144] + LONG $0x98b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 152] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x88b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 136] + LONG $0x90b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 144] QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x88b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 136] + QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x78723b4c // cmp r14, qword [rdx + 120] LONG $0xd0950f41 // setne r8b LONG $0x70723b4c // cmp r14, qword [rdx + 112] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x68723b4c // cmp r14, qword [rdx + 104] LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x68723b4c // cmp r14, qword [rdx + 104] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x60723b4c // cmp r14, qword [rdx + 96] LONG $0xd3950f41 // setne r11b LONG $0x58723b4c // cmp r14, qword [rdx + 88] @@ -28069,28 +29265,28 @@ LBB5_89: LONG $0x20723b4c // cmp r14, qword [rdx + 32] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x18723b4c // cmp r14, qword [rdx + 24] - LONG $0xd5950f41 // setne r13b - LONG $0x10723b4c // cmp r14, qword [rdx + 16] LONG $0xd4950f41 // setne r12b + LONG $0x10723b4c // cmp r14, qword [rdx + 16] + LONG $0xd5950f41 // setne r13b WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x08723b4c // cmp r14, qword [rdx + 8] LONG $0xd7950f41 // setne r15b LONG $0x40723b4c // cmp r14, qword [rdx + 64] QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x80b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 128] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0xc0b23b4c; WORD $0x0000; BYTE $0x00 // cmp r14, qword [rdx + 192] - QUAD $0x000000882494950f // setne byte [rsp + 136] + QUAD $0x000000802494950f // setne byte [rsp + 128] WORD $0x0045; BYTE $0xff // add r15b, r15b - QUAD $0x000000a824bc0244 // add r15b, byte [rsp + 168] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xfc // or r12b, r15b + QUAD $0x000000b024bc0244 // add r15b, byte [rsp + 176] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0845; BYTE $0xfd // or r13b, r15b QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0844; BYTE $0xe1 // or cl, r12b LONG $0x05e6c040 // shl sil, 5 WORD $0x0840; BYTE $0xce // or sil, cl WORD $0xe3c0; BYTE $0x06 // shl bl, 6 @@ -28106,32 +29302,32 @@ LBB5_89: WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e3c041 // shl r11b, 4 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xd8 // or al, r11b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x01478845 // mov byte [r15 + 1], r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xc000 // add al, al - LONG $0x70244402 // add al, byte [rsp + 112] + LONG $0x68244402 // add al, byte [rsp + 104] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -28142,15 +29338,15 @@ LBB5_89: WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02478841 // mov byte [r15 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x88248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 136] + LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -28171,26 +29367,27 @@ LBB5_89: LONG $0x03478841 // mov byte [r15 + 3], al LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 - JNE LBB5_89 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + JNE LBB5_92 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] -LBB5_91: +LBB5_94: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 JNE LBB5_144 -LBB5_93: +LBB5_96: WORD $0xff31 // xor edi, edi JMP LBB5_146 -LBB5_94: +LBB5_97: LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xda490f4d // cmovns r11, r10 @@ -28200,203 +29397,292 @@ LBB5_94: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x0610fac5 // vmovss xmm0, dword [rsi] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_98 + JE LBB5_101 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB5_96: - LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - LONG $0x04528d48 // lea rdx, [rdx + 4] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl - LONG $0x07708d48 // lea rsi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf0490f48 // cmovns rsi, rax - LONG $0x03fec148 // sar rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b - QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] - WORD $0xc189 // mov ecx, eax - WORD $0x2944; BYTE $0xc1 // sub ecx, r8d - LONG $0x000001bf; BYTE $0x00 // mov edi, 1 - WORD $0xe7d3 // shl edi, cl - WORD $0x2040; BYTE $0xdf // and dil, bl - WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x373c8841 // mov byte [r15 + rsi], dil - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_96 - LONG $0x01c78349 // add r15, 1 +LBB5_99: + LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] + LONG $0x04528d48 // lea rdx, [rdx + 4] + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xcb08 // or bl, cl + WORD $0xdbf6 // neg bl + LONG $0x07708d48 // lea rsi, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xf0490f48 // cmovns rsi, rax + LONG $0x03fec148 // sar rsi, 3 + WORD $0x894d; BYTE $0xee // mov r14, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0x3044; BYTE $0xcb // xor bl, r9b + QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] + WORD $0xc189 // mov ecx, eax + WORD $0x2944; BYTE $0xc1 // sub ecx, r8d + LONG $0x000001bf; BYTE $0x00 // mov edi, 1 + WORD $0xe7d3 // shl edi, cl + WORD $0x2040; BYTE $0xdf // and dil, bl + WORD $0x3044; BYTE $0xcf // xor dil, r9b + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 + JNE LBB5_99 + LONG $0x01c58349 // add r13, 1 -LBB5_98: +LBB5_101: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_102 + JL LBB5_105 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 + QUAD $0x000000b8249c894c // mov qword [rsp + 184], r11 QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 - QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 -LBB5_100: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 +LBB5_103: + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - QUAD $0x000000982494950f // setne byte [rsp + 152] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x422ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rdx + 4] - LONG $0xd1950f41 // setne r9b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al LONG $0x422ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rdx + 8] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x422ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rdx + 12] - LONG $0xd5950f41 // setne r13b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 320], cl LONG $0x422ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rdx + 16] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x422ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rdx + 20] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al LONG $0x422ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rdx + 24] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x422ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rdx + 28] - LONG $0xd4950f41 // setne r12b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x1c244c88 // mov byte [rsp + 28], cl LONG $0x422ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rdx + 32] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x422ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rdx + 36] - LONG $0xd6950f40 // setne sil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x422ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rdx + 40] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x422ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rdx + 44] - LONG $0xd0950f41 // setne r8b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x422ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rdx + 48] - LONG $0xd2950f41 // setne r10b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20248c88; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], cl LONG $0x422ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rdx + 52] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x422ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rdx + 56] - QUAD $0x000000802494950f // setne byte [rsp + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl LONG $0x422ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rdx + 60] + WORD $0x9a0f; BYTE $0xd0 // setp al WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl LONG $0x422ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rdx + 64] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x422ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rdx + 68] - QUAD $0x000000882494950f // setne byte [rsp + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl LONG $0x422ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rdx + 72] - QUAD $0x000000902494950f // setne byte [rsp + 144] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al LONG $0x422ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rdx + 76] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl LONG $0x422ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rdx + 80] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl LONG $0x422ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rdx + 84] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl LONG $0x422ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rdx + 88] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] - LONG $0x422ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rdx + 92] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x422ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rdx + 92] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x422ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rdx + 96] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x422ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rdx + 100] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al LONG $0x422ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rdx + 104] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al LONG $0x422ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rdx + 108] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x422ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rdx + 112] - QUAD $0x000001402494950f // setne byte [rsp + 320] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl LONG $0x422ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rdx + 116] - QUAD $0x000001202494950f // setne byte [rsp + 288] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xa8248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 168], cl LONG $0x422ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rdx + 120] - LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x422ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rdx + 124] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000098248c0244 // add r9b, byte [rsp + 152] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e4c041 // shl r12b, 7 - WORD $0x0841; BYTE $0xdc // or r12b, bl - LONG $0x02e3c041 // shl r11b, 2 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - WORD $0x0040; BYTE $0xf6 // add sil, sil - LONG $0x24740240; BYTE $0x78 // add sil, byte [rsp + 120] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xdd // or r13b, r11b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] - WORD $0xe3c0; BYTE $0x04 // shl bl, 4 - WORD $0x0844; BYTE $0xeb // or bl, r13b - WORD $0xde89 // mov esi, ebx - LONG $0x03e0c041 // shl r8b, 3 - WORD $0x0841; BYTE $0xf8 // or r8b, dil - LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] - WORD $0xe3c0; BYTE $0x05 // shl bl, 5 - WORD $0x0840; BYTE $0xf3 // or bl, sil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xc2 // or r10b, r8b - LONG $0x05e7c041 // shl r15b, 5 - WORD $0x0845; BYTE $0xd7 // or r15b, r10b - QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xdc // or r12b, bl - WORD $0x0844; BYTE $0xf9 // or cl, r15b - QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - QUAD $0x00000088249cb60f // movzx ebx, byte [rsp + 136] - WORD $0xdb00 // add bl, bl - LONG $0x58245c02 // add bl, byte [rsp + 88] - WORD $0xde89 // mov esi, ebx - QUAD $0x00000090249cb60f // movzx ebx, byte [rsp + 144] - WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f40 // setne sil + WORD $0x0840; BYTE $0xc6 // or sil, al + WORD $0x0045; BYTE $0xed // add r13b, r13b + LONG $0x246c0244; BYTE $0x30 // add r13b, byte [rsp + 48] + WORD $0x8944; BYTE $0xe8 // mov eax, r13d + LONG $0x05e4c041 // shl r12b, 5 + LONG $0x6cb60f44; WORD $0x4824 // movzx r13d, byte [rsp + 72] + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x64b60f44; WORD $0x3824 // movzx r12d, byte [rsp + 56] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + WORD $0xc900 // add cl, cl + LONG $0x28244c02 // add cl, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x1c // movzx eax, byte [rsp + 28] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x1c244488 // mov byte [rsp + 28], al + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe0 // or al, r12b WORD $0xe3c0; BYTE $0x03 // shl bl, 3 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] - WORD $0xe3c0; BYTE $0x04 // shl bl, 4 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x05 // shl bl, 5 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0x8845; BYTE $0x27 // mov byte [r15], r12b - LONG $0x2474b60f; BYTE $0x48 // movzx esi, byte [rsp + 72] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e6c041 // shl r14b, 7 - WORD $0x0841; BYTE $0xf6 // or r14b, sil - LONG $0x014f8841 // mov byte [r15 + 1], cl - WORD $0x0841; BYTE $0xde // or r14b, bl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + WORD $0xcb08 // or bl, cl + LONG $0x6cb60f44; WORD $0x2024 // movzx r13d, byte [rsp + 32] + LONG $0x04e5c041 // shl r13b, 4 + WORD $0x0841; BYTE $0xc5 // or r13b, al + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + LONG $0x20248488; WORD $0x0001; BYTE $0x00 // mov byte [rsp + 288], al + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xc808 // or al, cl + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + LONG $0x58244c02 // add cl, byte [rsp + 88] + LONG $0x02e7c041 // shl r15b, 2 + WORD $0x0841; BYTE $0xcf // or r15b, cl + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl + WORD $0x0844; BYTE $0xf9 // or cl, r15b WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] + QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - WORD $0xe0c0; BYTE $0x07 // shl al, 7 - WORD $0xd808 // or al, bl - WORD $0xc808 // or al, cl - LONG $0x02778845 // mov byte [r15 + 2], r14b - LONG $0x03478841 // mov byte [r15 + 3], al + LONG $0x64b60f44; WORD $0x1c24 // movzx r12d, byte [rsp + 28] + WORD $0x0845; BYTE $0xec // or r12b, r13b + QUAD $0x00000098249cb60f // movzx ebx, byte [rsp + 152] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e6c041 // shl r14b, 6 + WORD $0x0841; BYTE $0xde // or r14b, bl + LONG $0x2024840a; WORD $0x0001; BYTE $0x00 // or al, byte [rsp + 288] + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf0 // or r8b, r14b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0045; BYTE $0xdb // add r11b, r11b + LONG $0x245c0244; BYTE $0x68 // add r11b, byte [rsp + 104] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + QUAD $0x000000b0248cb60f // movzx ecx, byte [rsp + 176] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xc9 // or cl, r9b + WORD $0x8845; BYTE $0x27 // mov byte [r15], r12b + QUAD $0x000000a8249cb60f // movzx ebx, byte [rsp + 168] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x01478841 // mov byte [r15 + 1], al + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0840; BYTE $0xce // or sil, cl + LONG $0x02478845 // mov byte [r15 + 2], r8b + LONG $0x03778841 // mov byte [r15 + 3], sil LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB5_100 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 + JNE LBB5_103 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] -LBB5_102: +LBB5_105: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 @@ -28405,7 +29691,7 @@ LBB5_102: WORD $0xff31 // xor edi, edi JMP LBB5_150 -LBB5_105: +LBB5_108: WORD $0x8a44; BYTE $0x1e // mov r11b, byte [rsi] LONG $0x1f728d4d // lea r14, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -28415,61 +29701,62 @@ LBB5_105: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_109 + JE LBB5_112 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB5_107: - WORD $0x3a44; BYTE $0x1a // cmp r11b, byte [rdx] - LONG $0x01528d48 // lea rdx, [rdx + 1] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl - LONG $0x07708d48 // lea rsi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf0490f48 // cmovns rsi, rax - LONG $0x03fec148 // sar rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b - QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] - WORD $0xc189 // mov ecx, eax - WORD $0x2944; BYTE $0xc1 // sub ecx, r8d - LONG $0x000001bf; BYTE $0x00 // mov edi, 1 - WORD $0xe7d3 // shl edi, cl - WORD $0x2040; BYTE $0xdf // and dil, bl - WORD $0x3044; BYTE $0xcf // xor dil, r9b - LONG $0x373c8841 // mov byte [r15 + rsi], dil - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_107 - LONG $0x01c78349 // add r15, 1 +LBB5_110: + WORD $0x3a44; BYTE $0x1a // cmp r11b, byte [rdx] + LONG $0x01528d48 // lea rdx, [rdx + 1] + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xdbf6 // neg bl + LONG $0x07708d48 // lea rsi, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xf0490f48 // cmovns rsi, rax + LONG $0x03fec148 // sar rsi, 3 + WORD $0x894d; BYTE $0xef // mov r15, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0x3044; BYTE $0xcb // xor bl, r9b + QUAD $0x00000000f5048d44 // lea r8d, [8*rsi] + WORD $0xc189 // mov ecx, eax + WORD $0x2944; BYTE $0xc1 // sub ecx, r8d + LONG $0x000001bf; BYTE $0x00 // mov edi, 1 + WORD $0xe7d3 // shl edi, cl + WORD $0x2040; BYTE $0xdf // and dil, bl + WORD $0x3044; BYTE $0xcf // xor dil, r9b + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 + JNE LBB5_110 + LONG $0x01c58349 // add r13, 1 -LBB5_109: +LBB5_112: LONG $0x05fec149 // sar r14, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_132 + JL LBB5_120 LONG $0x20fe8349 // cmp r14, 32 LONG $0x245c8944; BYTE $0x1c // mov dword [rsp + 28], r11d QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x0000018024b4894c // mov qword [rsp + 384], r14 - JB LBB5_113 + JB LBB5_116 WORD $0x894c; BYTE $0xf0 // mov rax, r14 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx - WORD $0x3949; BYTE $0xc7 // cmp r15, rax + WORD $0x3949; BYTE $0xc5 // cmp r13, rax JAE LBB5_168 - LONG $0xb7048d4b // lea rax, [r15 + 4*r14] + QUAD $0x00000000b5048d4a // lea rax, [4*r14] + WORD $0x014c; BYTE $0xe8 // add rax, r13 WORD $0x3948; BYTE $0xc2 // cmp rdx, rax JAE LBB5_168 -LBB5_113: +LBB5_116: WORD $0xc031 // xor eax, eax QUAD $0x0000017824848948 // mov qword [rsp + 376], rax - WORD $0x894d; BYTE $0xfd // mov r13, r15 -LBB5_114: +LBB5_117: QUAD $0x0000017824b42b4c // sub r14, qword [rsp + 376] - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 + QUAD $0x000000a024b4894c // mov qword [rsp + 160], r14 -LBB5_115: +LBB5_118: LONG $0x1f5a3a44 // cmp r11b, byte [rdx + 31] QUAD $0x000001102494950f // setne byte [rsp + 272] LONG $0x1e5a3a44 // cmp r11b, byte [rdx + 30] @@ -28479,31 +29766,31 @@ LBB5_115: LONG $0x1c5a3a44 // cmp r11b, byte [rdx + 28] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x1b5a3a44 // cmp r11b, byte [rdx + 27] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x1a5a3a44 // cmp r11b, byte [rdx + 26] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x195a3a44 // cmp r11b, byte [rdx + 25] + LONG $0x1a5a3a44 // cmp r11b, byte [rdx + 26] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x195a3a44 // cmp r11b, byte [rdx + 25] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x175a3a44 // cmp r11b, byte [rdx + 23] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x165a3a44 // cmp r11b, byte [rdx + 22] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x155a3a44 // cmp r11b, byte [rdx + 21] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x145a3a44 // cmp r11b, byte [rdx + 20] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] LONG $0x135a3a44 // cmp r11b, byte [rdx + 19] - QUAD $0x000000902494950f // setne byte [rsp + 144] - LONG $0x125a3a44 // cmp r11b, byte [rdx + 18] QUAD $0x000000882494950f // setne byte [rsp + 136] - LONG $0x115a3a44 // cmp r11b, byte [rdx + 17] + LONG $0x125a3a44 // cmp r11b, byte [rdx + 18] QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x115a3a44 // cmp r11b, byte [rdx + 17] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x0f5a3a44 // cmp r11b, byte [rdx + 15] LONG $0xd6950f41 // setne r14b LONG $0x0e5a3a44 // cmp r11b, byte [rdx + 14] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] - LONG $0x0d5a3a44 // cmp r11b, byte [rdx + 13] LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x0d5a3a44 // cmp r11b, byte [rdx + 13] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x0c5a3a44 // cmp r11b, byte [rdx + 12] LONG $0xd4950f41 // setne r12b LONG $0x0b5a3a44 // cmp r11b, byte [rdx + 11] @@ -28518,7 +29805,7 @@ LBB5_115: LONG $0xd7950f40 // setne dil LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] LONG $0xd1950f41 // setne r9b @@ -28533,7 +29820,7 @@ LBB5_115: WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x023a // cmp al, byte [rdx] - QUAD $0x000000a02494950f // setne byte [rsp + 160] + QUAD $0x000000a82494950f // setne byte [rsp + 168] LONG $0x1c24448b // mov eax, dword [rsp + 28] WORD $0x423a; BYTE $0x01 // cmp al, byte [rdx + 1] WORD $0x950f; BYTE $0xd0 // setne al @@ -28544,12 +29831,12 @@ LBB5_115: QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x1c245c8b // mov ebx, dword [rsp + 28] WORD $0x5a3a; BYTE $0x10 // cmp bl, byte [rdx + 16] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x1c245c8b // mov ebx, dword [rsp + 28] WORD $0x5a3a; BYTE $0x18 // cmp bl, byte [rdx + 24] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] WORD $0xc000 // add al, al - LONG $0xa0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 160] + LONG $0xa8248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 168] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xc108 // or cl, al LONG $0x03e6c040 // shl sil, 3 @@ -28558,7 +29845,7 @@ LBB5_115: WORD $0x0841; BYTE $0xf0 // or r8b, sil LONG $0x05e1c041 // shl r9b, 5 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000a82484b60f // movzx eax, byte [rsp + 168] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e7c040 // shl dil, 7 WORD $0x0840; BYTE $0xc7 // or dil, al @@ -28573,32 +29860,32 @@ LBB5_115: LONG $0x245c8b44; BYTE $0x1c // mov r11d, dword [rsp + 28] LONG $0x04e4c041 // shl r12b, 4 WORD $0x0845; BYTE $0xfc // or r12b, r15b - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xe0 // or al, r12b - LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e6c041 // shl r14b, 7 WORD $0x0841; BYTE $0xce // or r14b, cl WORD $0x0841; BYTE $0xc6 // or r14b, al LONG $0x01758845 // mov byte [r13 + 1], r14b - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0x50244402 // add al, byte [rsp + 80] + LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] WORD $0xc189 // mov ecx, eax - QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -28609,15 +29896,15 @@ LBB5_115: WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02458841 // mov byte [r13 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x60244402 // add al, byte [rsp + 96] + LONG $0x58244402 // add al, byte [rsp + 88] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -28638,13 +29925,26 @@ LBB5_115: LONG $0x03458841 // mov byte [r13 + 3], al LONG $0x20c28348 // add rdx, 32 LONG $0x04c58349 // add r13, 4 - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 - JNE LBB5_115 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 + JNE LBB5_118 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x0000018024b48b4c // mov r14, qword [rsp + 384] - JMP LBB5_133 -LBB5_117: +LBB5_120: + LONG $0x05e6c149 // shl r14, 5 + WORD $0x394d; BYTE $0xd6 // cmp r14, r10 + JGE LBB5_164 + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0x294d; BYTE $0xf0 // sub r8, r14 + WORD $0xf749; BYTE $0xd6 // not r14 + WORD $0x014d; BYTE $0xd6 // add r14, r10 + JNE LBB5_153 + +LBB5_122: + WORD $0xf631 // xor esi, esi + JMP LBB5_156 + +LBB5_123: WORD $0x8b44; BYTE $0x36 // mov r14d, dword [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -28654,10 +29954,10 @@ LBB5_117: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_121 + JE LBB5_127 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB5_119: +LBB5_125: WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] LONG $0x04528d48 // lea rdx, [rdx + 4] WORD $0x950f; BYTE $0xd3 // setne bl @@ -28666,7 +29966,8 @@ LBB5_119: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf0490f48 // cmovns rsi, rax LONG $0x03fec148 // sar rsi, 3 - LONG $0x04b60f45; BYTE $0x37 // movzx r8d, byte [r15 + rsi] + WORD $0x894d; BYTE $0xe9 // mov r9, r13 + LONG $0x44b60f45; WORD $0x0035 // movzx r8d, byte [r13 + rsi] WORD $0x3044; BYTE $0xc3 // xor bl, r8b LONG $0x00f53c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rsi] WORD $0xc189 // mov ecx, eax @@ -28675,22 +29976,22 @@ LBB5_119: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xdf // and dil, bl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x373c8841 // mov byte [r15 + rsi], dil + LONG $0x357c8841; BYTE $0x00 // mov byte [r13 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_119 - LONG $0x01c78349 // add r15, 1 + JNE LBB5_125 + LONG $0x01c58349 // add r13, 1 -LBB5_121: +LBB5_127: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB5_125 + JL LBB5_131 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 - QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + QUAD $0x000000a8249c894c // mov qword [rsp + 168], r11 -LBB5_123: - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 +LBB5_129: + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LONG $0x7c723b44 // cmp r14d, dword [rdx + 124] LONG $0x2454950f; BYTE $0x1c // setne byte [rsp + 28] LONG $0x78723b44 // cmp r14d, dword [rdx + 120] @@ -28700,31 +30001,31 @@ LBB5_123: LONG $0x70723b44 // cmp r14d, dword [rdx + 112] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x6c723b44 // cmp r14d, dword [rdx + 108] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x68723b44 // cmp r14d, dword [rdx + 104] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - LONG $0x64723b44 // cmp r14d, dword [rdx + 100] + LONG $0x68723b44 // cmp r14d, dword [rdx + 104] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + LONG $0x64723b44 // cmp r14d, dword [rdx + 100] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x5c723b44 // cmp r14d, dword [rdx + 92] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x58723b44 // cmp r14d, dword [rdx + 88] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x54723b44 // cmp r14d, dword [rdx + 84] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] - LONG $0x50723b44 // cmp r14d, dword [rdx + 80] LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - LONG $0x4c723b44 // cmp r14d, dword [rdx + 76] + LONG $0x50723b44 // cmp r14d, dword [rdx + 80] LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x48723b44 // cmp r14d, dword [rdx + 72] + LONG $0x4c723b44 // cmp r14d, dword [rdx + 76] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x44723b44 // cmp r14d, dword [rdx + 68] + LONG $0x48723b44 // cmp r14d, dword [rdx + 72] QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x44723b44 // cmp r14d, dword [rdx + 68] + QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x3c723b44 // cmp r14d, dword [rdx + 60] LONG $0xd0950f41 // setne r8b LONG $0x38723b44 // cmp r14d, dword [rdx + 56] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x34723b44 // cmp r14d, dword [rdx + 52] LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x34723b44 // cmp r14d, dword [rdx + 52] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x30723b44 // cmp r14d, dword [rdx + 48] LONG $0xd3950f41 // setne r11b LONG $0x2c723b44 // cmp r14d, dword [rdx + 44] @@ -28742,28 +30043,28 @@ LBB5_123: LONG $0x10723b44 // cmp r14d, dword [rdx + 16] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x0c723b44 // cmp r14d, dword [rdx + 12] - LONG $0xd5950f41 // setne r13b - LONG $0x08723b44 // cmp r14d, dword [rdx + 8] LONG $0xd4950f41 // setne r12b + LONG $0x08723b44 // cmp r14d, dword [rdx + 8] + LONG $0xd5950f41 // setne r13b WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] - QUAD $0x000000a82494950f // setne byte [rsp + 168] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x04723b44 // cmp r14d, dword [rdx + 4] LONG $0xd7950f41 // setne r15b LONG $0x20723b44 // cmp r14d, dword [rdx + 32] QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x40723b44 // cmp r14d, dword [rdx + 64] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x60723b44 // cmp r14d, dword [rdx + 96] - QUAD $0x000000882494950f // setne byte [rsp + 136] + QUAD $0x000000802494950f // setne byte [rsp + 128] WORD $0x0045; BYTE $0xff // add r15b, r15b - QUAD $0x000000a824bc0244 // add r15b, byte [rsp + 168] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xfc // or r12b, r15b + QUAD $0x000000b024bc0244 // add r15b, byte [rsp + 176] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0845; BYTE $0xfd // or r13b, r15b QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0844; BYTE $0xe1 // or cl, r12b LONG $0x05e6c040 // shl sil, 5 WORD $0x0840; BYTE $0xce // or sil, cl WORD $0xe3c0; BYTE $0x06 // shl bl, 6 @@ -28779,32 +30080,32 @@ LBB5_123: WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e3c041 // shl r11b, 4 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xd8 // or al, r11b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x01478845 // mov byte [r15 + 1], r8b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xc000 // add al, al - LONG $0x70244402 // add al, byte [rsp + 112] + LONG $0x68244402 // add al, byte [rsp + 104] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -28815,15 +30116,15 @@ LBB5_123: WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl LONG $0x02478841 // mov byte [r15 + 2], al - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x88248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 136] + LONG $0x80248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 128] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -28844,58 +30145,96 @@ LBB5_123: LONG $0x03478841 // mov byte [r15 + 3], al LONG $0x80ea8348 // sub rdx, -128 LONG $0x04c78349 // add r15, 4 - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 - JNE LBB5_123 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 + JNE LBB5_129 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] -LBB5_125: +LBB5_131: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB5_157 + JGE LBB5_164 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB5_152 + JNE LBB5_158 -LBB5_127: +LBB5_133: WORD $0xff31 // xor edi, edi - JMP LBB5_154 + JMP LBB5_160 -LBB5_128: - WORD $0x894d; BYTE $0xfd // mov r13, r15 +LBB5_134: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0xff31 // xor edi, edi -LBB5_129: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB5_157 - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 - JE LBB5_135 +LBB5_135: + LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xc808 // or al, cl + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xee // mov r14, r13 + LONG $0x54b60f45; WORD $0x0035 // movzx r10d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b + WORD $0x2041; BYTE $0xc3 // and r11b, al + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x355c8845; BYTE $0x00 // mov byte [r13 + rsi], r11b + LONG $0x02c78348 // add rdi, 2 + LONG $0x422ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rdx + 8] + LONG $0x10528d48 // lea rdx, [rdx + 16] + LONG $0xd29a0f41 // setp r10b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xd8f6 // neg al + WORD $0x3044; BYTE $0xd8 // xor al, r11b + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xdb // xor bl, r11b + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + JNE LBB5_135 + +LBB5_136: + LONG $0x01c0f641 // test r8b, 1 + JE LBB5_164 + LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] + JMP LBB5_152 + +LBB5_140: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 - WORD $0xf631 // xor esi, esi + WORD $0xff31 // xor edi, edi -LBB5_159: - LONG $0x321c3a44 // cmp r11b, byte [rdx + rsi] +LBB5_141: + LONG $0x323b4466 // cmp r14w, word [rdx] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xf7 // mov rdi, rsi - LONG $0x03efc148 // shr rdi, 3 - WORD $0xf189 // mov ecx, esi + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xeb // mov r11, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl - LONG $0x4cb60f45; WORD $0x003d // movzx r9d, byte [r13 + rdi] WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3d5c8841; BYTE $0x00 // mov byte [r13 + rdi], bl - LONG $0x325c3a44; BYTE $0x01 // cmp r11b, byte [rdx + rsi + 1] - LONG $0x02768d48 // lea rsi, [rsi + 2] + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + LONG $0x02c78348 // add rdi, 2 + LONG $0x723b4466; BYTE $0x02 // cmp r14w, word [rdx + 2] + LONG $0x04528d48 // lea rdx, [rdx + 4] LONG $0xd1950f41 // setne r9b WORD $0xf641; BYTE $0xd9 // neg r9b WORD $0x3041; BYTE $0xd9 // xor r9b, bl @@ -28904,109 +30243,15 @@ LBB5_159: WORD $0xe0d2 // shl al, cl WORD $0x2044; BYTE $0xc8 // and al, r9b WORD $0xd830 // xor al, bl - LONG $0x3d448841; BYTE $0x00 // mov byte [r13 + rdi], al - WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB5_159 - JMP LBB5_162 - -LBB5_132: - WORD $0x894d; BYTE $0xfd // mov r13, r15 - -LBB5_133: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB5_157 - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 - JNE LBB5_160 - -LBB5_135: - WORD $0xf631 // xor esi, esi - JMP LBB5_163 - -LBB5_136: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0xff31 // xor edi, edi - -LBB5_137: - LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x422ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rdx + 8] - LONG $0x10528d48 // lea rdx, [rdx + 16] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB5_137 - -LBB5_138: - LONG $0x01c0f641 // test r8b, 1 - JE LBB5_157 - LONG $0x022ef9c5 // vucomisd xmm0, qword [rdx] - JMP LBB5_156 - -LBB5_140: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0xff31 // xor edi, edi - -LBB5_141: - LONG $0x323b4466 // cmp r14w, word [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x723b4466; BYTE $0x02 // cmp r14w, word [rdx + 2] - LONG $0x04528d48 // lea rdx, [rdx + 4] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi + LONG $0x35448841; BYTE $0x00 // mov byte [r13 + rsi], al + WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB5_141 LBB5_142: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_157 + JE LBB5_164 LONG $0x323b4466 // cmp r14w, word [rdx] - JMP LBB5_156 + JMP LBB5_162 LBB5_144: WORD $0x894d; BYTE $0xc2 // mov r10, r8 @@ -29014,148 +30259,111 @@ LBB5_144: WORD $0xff31 // xor edi, edi LBB5_145: - WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x08723b4c // cmp r14, qword [rdx + 8] - LONG $0x10528d48 // lea rdx, [rdx + 16] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi + WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xeb // mov r11, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + LONG $0x02c78348 // add rdi, 2 + LONG $0x08723b4c // cmp r14, qword [rdx + 8] + LONG $0x10528d48 // lea rdx, [rdx + 16] + LONG $0xd1950f41 // setne r9b + WORD $0xf641; BYTE $0xd9 // neg r9b + WORD $0x3041; BYTE $0xd9 // xor r9b, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0x2044; BYTE $0xc8 // and al, r9b + WORD $0xd830 // xor al, bl + LONG $0x35448841; BYTE $0x00 // mov byte [r13 + rsi], al + WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB5_145 LBB5_146: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_157 + JE LBB5_164 WORD $0x3b4c; BYTE $0x32 // cmp r14, qword [rdx] - JMP LBB5_156 + JMP LBB5_162 LBB5_148: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi LBB5_149: - LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x422ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rdx + 4] - LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi + LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xc808 // or al, cl + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xee // mov r14, r13 + LONG $0x54b60f45; WORD $0x0035 // movzx r10d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b + WORD $0x2041; BYTE $0xc3 // and r11b, al + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x355c8845; BYTE $0x00 // mov byte [r13 + rsi], r11b + LONG $0x02c78348 // add rdi, 2 + LONG $0x422ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rdx + 4] + LONG $0x08528d48 // lea rdx, [rdx + 8] + LONG $0xd29a0f41 // setp r10b + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xd8f6 // neg al + WORD $0x3044; BYTE $0xd8 // xor al, r11b + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xdb // xor bl, r11b + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi JNE LBB5_149 LBB5_150: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_157 + JE LBB5_164 LONG $0x022ef8c5 // vucomiss xmm0, dword [rdx] - JMP LBB5_156 LBB5_152: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0xff31 // xor edi, edi - -LBB5_153: - WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x37 // movzx r9d, byte [r15 + rsi] - WORD $0xf989 // mov ecx, edi - WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x05748a41; BYTE $0x00 // mov sil, byte [r13 + rax] + LONG $0x07e78040 // and dil, 7 WORD $0x01b3 // mov bl, 1 + WORD $0xf989 // mov ecx, edi WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x371c8841 // mov byte [r15 + rsi], bl - LONG $0x02c78348 // add rdi, 2 - LONG $0x04723b44 // cmp r14d, dword [rdx + 4] - LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x37048841 // mov byte [r15 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB5_153 - -LBB5_154: - LONG $0x01c0f641 // test r8b, 1 - JE LBB5_157 - WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] - -LBB5_156: - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xfa // mov rdx, rdi - LONG $0x03eac148 // shr rdx, 3 - LONG $0x17348a41 // mov sil, byte [r15 + rdx] - LONG $0x07e78040 // and dil, 7 - WORD $0x01b3 // mov bl, 1 - WORD $0xf989 // mov ecx, edi - WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf0 // xor al, sil - WORD $0xc320 // and bl, al - WORD $0x3040; BYTE $0xf3 // xor bl, sil - LONG $0x171c8841 // mov byte [r15 + rdx], bl - -LBB5_157: - MOVQ 1280(SP), SP - VZEROUPPER - RET + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x055c8841; BYTE $0x00 // mov byte [r13 + rax], bl + JMP LBB5_164 -LBB5_160: +LBB5_153: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi -LBB5_161: +LBB5_154: LONG $0x321c3a44 // cmp r11b, byte [rdx + rsi] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -29182,14 +30390,14 @@ LBB5_161: WORD $0xd830 // xor al, bl LONG $0x3d448841; BYTE $0x00 // mov byte [r13 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB5_161 + JNE LBB5_154 -LBB5_162: +LBB5_155: WORD $0x0148; BYTE $0xf2 // add rdx, rsi -LBB5_163: +LBB5_156: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_157 + JE LBB5_164 WORD $0x3a44; BYTE $0x1a // cmp r11b, byte [rdx] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -29203,8 +30411,70 @@ LBB5_163: WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil + JMP LBB5_163 + +LBB5_158: + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xff31 // xor edi, edi + +LBB5_159: + WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + LONG $0x03eec148 // shr rsi, 3 + WORD $0x894d; BYTE $0xeb // mov r11, r13 + LONG $0x4cb60f45; WORD $0x0035 // movzx r9d, byte [r13 + rsi] + WORD $0xf989 // mov ecx, edi + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x355c8841; BYTE $0x00 // mov byte [r13 + rsi], bl + LONG $0x02c78348 // add rdi, 2 + LONG $0x04723b44 // cmp r14d, dword [rdx + 4] + LONG $0x08528d48 // lea rdx, [rdx + 8] + LONG $0xd1950f41 // setne r9b + WORD $0xf641; BYTE $0xd9 // neg r9b + WORD $0x3041; BYTE $0xd9 // xor r9b, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0x2044; BYTE $0xc8 // and al, r9b + WORD $0xd830 // xor al, bl + LONG $0x35448841; BYTE $0x00 // mov byte [r13 + rsi], al + WORD $0x3949; BYTE $0xfa // cmp r10, rdi + JNE LBB5_159 + +LBB5_160: + LONG $0x01c0f641 // test r8b, 1 + JE LBB5_164 + WORD $0x3b44; BYTE $0x32 // cmp r14d, dword [rdx] + +LBB5_162: + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xfa // mov rdx, rdi + LONG $0x03eac148 // shr rdx, 3 + LONG $0x15748a41; BYTE $0x00 // mov sil, byte [r13 + rdx] + LONG $0x07e78040 // and dil, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0xf989 // mov ecx, edi + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf0 // xor al, sil + WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xf3 // xor bl, sil + +LBB5_163: LONG $0x155c8841; BYTE $0x00 // mov byte [r13 + rdx], bl - JMP LBB5_157 + +LBB5_164: + MOVQ 1280(SP), SP + VZEROUPPER + RET LBB5_165: LONG $0xe0e68349 // and r14, -32 @@ -29213,13 +30483,14 @@ LBB5_165: WORD $0x0148; BYTE $0xd0 // add rax, rdx QUAD $0x0000018824848948 // mov qword [rsp + 392], rax QUAD $0x0000017824b4894c // mov qword [rsp + 376], r14 - LONG $0xb7048d4b // lea rax, [r15 + 4*r14] + QUAD $0x00000000b5048d4a // lea rax, [4*r14] + WORD $0x014c; BYTE $0xe8 // add rax, r13 QUAD $0x0000019024848948 // mov qword [rsp + 400], rax LONG $0x6e79c1c4; BYTE $0xc3 // vmovd xmm0, r11d LONG $0x787de2c4; BYTE $0xc0 // vpbroadcastb ymm0, xmm0 QUAD $0x00020024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 512], ymm0 WORD $0xf631 // xor esi, esi - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LBB5_166: QUAD $0x0000019824b48948 // mov qword [rsp + 408], rsi @@ -29229,32 +30500,32 @@ LBB5_166: QUAD $0x000000e824848948 // mov qword [rsp + 232], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x40c88348 // or rax, 64 - QUAD $0x000000e024848948 // mov qword [rsp + 224], rax + QUAD $0x000000d824848948 // mov qword [rsp + 216], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x60c88348 // or rax, 96 - QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + QUAD $0x000000a024848948 // mov qword [rsp + 160], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x00800d48; WORD $0x0000 // or rax, 128 - QUAD $0x0000014024848948 // mov qword [rsp + 320], rax + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x00a00d48; WORD $0x0000 // or rax, 160 - LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x00c00d48; WORD $0x0000 // or rax, 192 - QUAD $0x000000a824848948 // mov qword [rsp + 168], rax + QUAD $0x000000b024848948 // mov qword [rsp + 176], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x00e00d48; WORD $0x0000 // or rax, 224 - QUAD $0x000000a024848948 // mov qword [rsp + 160], rax + QUAD $0x000000a824848948 // mov qword [rsp + 168], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x01000d48; WORD $0x0000 // or rax, 256 QUAD $0x0000012024848948 // mov qword [rsp + 288], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x01200d48; WORD $0x0000 // or rax, 288 - LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi QUAD $0x0000010824b48948 // mov qword [rsp + 264], rsi LONG $0x01400d48; WORD $0x0000 // or rax, 320 - LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x02000d48; WORD $0x0000 // or rax, 512 WORD $0x8948; BYTE $0xc1 // mov rcx, rax @@ -29296,10 +30567,10 @@ LBB5_166: LONG $0xc86ef9c5 // vmovd xmm1, eax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x01600d48; WORD $0x0000 // or rax, 352 - LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax + LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x01800d48; WORD $0x0000 // or rax, 384 - LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax + QUAD $0x0000014024848948 // mov qword [rsp + 320], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x01a00d48; WORD $0x0000 // or rax, 416 LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax @@ -29312,10 +30583,10 @@ LBB5_166: WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x02200d48; WORD $0x0000 // or rax, 544 WORD $0x8949; BYTE $0xc5 // mov r13, rax - QUAD $0x000000d024848948 // mov qword [rsp + 208], rax + QUAD $0x000000b824848948 // mov qword [rsp + 184], rax WORD $0x8949; BYTE $0xf4 // mov r12, rsi LONG $0x40cc8149; WORD $0x0002; BYTE $0x00 // or r12, 576 - QUAD $0x000000c824a4894c // mov qword [rsp + 200], r12 + QUAD $0x000000e024a4894c // mov qword [rsp + 224], r12 WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x02600d48; WORD $0x0000 // or rax, 608 WORD $0x8949; BYTE $0xc6 // mov r14, rax @@ -29325,22 +30596,22 @@ LBB5_166: QUAD $0x0000010024bc894c // mov qword [rsp + 256], r15 WORD $0x8949; BYTE $0xf2 // mov r10, rsi LONG $0xa0ca8149; WORD $0x0002; BYTE $0x00 // or r10, 672 - LONG $0x2454894c; BYTE $0x70 // mov qword [rsp + 112], r10 + LONG $0x2454894c; BYTE $0x68 // mov qword [rsp + 104], r10 WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x02c00d48; WORD $0x0000 // or rax, 704 QUAD $0x0000008024848948 // mov qword [rsp + 128], rax WORD $0x8949; BYTE $0xf0 // mov r8, rsi LONG $0xe0c88149; WORD $0x0002; BYTE $0x00 // or r8, 736 - LONG $0x2444894c; BYTE $0x40 // mov qword [rsp + 64], r8 + LONG $0x2444894c; BYTE $0x70 // mov qword [rsp + 112], r8 WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x03000d48; WORD $0x0000 // or rax, 768 - QUAD $0x000000b824848948 // mov qword [rsp + 184], rax + QUAD $0x000000c824848948 // mov qword [rsp + 200], rax WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x03200d48; WORD $0x0000 // or rax, 800 QUAD $0x0000009824848948 // mov qword [rsp + 152], rax WORD $0x8949; BYTE $0xf1 // mov r9, rsi LONG $0x40c98149; WORD $0x0003; BYTE $0x00 // or r9, 832 - QUAD $0x000000b0248c894c // mov qword [rsp + 176], r9 + QUAD $0x000000d0248c894c // mov qword [rsp + 208], r9 WORD $0x8948; BYTE $0xf7 // mov rdi, rsi LONG $0x60cf8148; WORD $0x0003; BYTE $0x00 // or rdi, 864 LONG $0x247c8948; BYTE $0x60 // mov qword [rsp + 96], rdi @@ -29363,7 +30634,7 @@ LBB5_166: QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x06 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 6 LONG $0x2079a3c4; WORD $0x0204; BYTE $0x07 // vpinsrb xmm0, xmm0, byte [rdx + r8], 7 - QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] + QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x08 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 8 QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x09 // vpinsrb xmm0, xmm0, byte [rdx + rbx], 9 @@ -29375,27 +30646,27 @@ LBB5_166: LONG $0x2079e3c4; WORD $0x3204; BYTE $0x0f // vpinsrb xmm0, xmm0, byte [rdx + rsi], 15 QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] LONG $0x2061a3c4; WORD $0x221c; BYTE $0x01 // vpinsrb xmm3, xmm3, byte [rdx + r12], 1 - QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] + QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] LONG $0x2061a3c4; WORD $0x321c; BYTE $0x02 // vpinsrb xmm3, xmm3, byte [rdx + r14], 2 - QUAD $0x000000d8249c8b4c // mov r11, qword [rsp + 216] + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] LONG $0x2061a3c4; WORD $0x1a1c; BYTE $0x03 // vpinsrb xmm3, xmm3, byte [rdx + r11], 3 - QUAD $0x0000014024848b4c // mov r8, qword [rsp + 320] + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] LONG $0x2061a3c4; WORD $0x021c; BYTE $0x04 // vpinsrb xmm3, xmm3, byte [rdx + r8], 4 - LONG $0x244c8b4c; BYTE $0x38 // mov r9, qword [rsp + 56] + LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] LONG $0x2061a3c4; WORD $0x0a1c; BYTE $0x05 // vpinsrb xmm3, xmm3, byte [rdx + r9], 5 - QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] LONG $0x2061e3c4; WORD $0x1a1c; BYTE $0x06 // vpinsrb xmm3, xmm3, byte [rdx + rbx], 6 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] LONG $0x2061e3c4; WORD $0x321c; BYTE $0x07 // vpinsrb xmm3, xmm3, byte [rdx + rsi], 7 QUAD $0x0000012024bc8b4c // mov r15, qword [rsp + 288] LONG $0x2061a3c4; WORD $0x3a1c; BYTE $0x08 // vpinsrb xmm3, xmm3, byte [rdx + r15], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x09 // vpinsrb xmm3, xmm3, byte [rdx + rdi], 9 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] LONG $0x2061e3c4; WORD $0x021c; BYTE $0x0a // vpinsrb xmm3, xmm3, byte [rdx + rax], 10 - LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] + LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] LONG $0x2061a3c4; WORD $0x121c; BYTE $0x0b // vpinsrb xmm3, xmm3, byte [rdx + r10], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] LONG $0x2061e3c4; WORD $0x0a1c; BYTE $0x0c // vpinsrb xmm3, xmm3, byte [rdx + rcx], 12 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] LONG $0x2061e3c4; WORD $0x0a1c; BYTE $0x0d // vpinsrb xmm3, xmm3, byte [rdx + rcx], 13 @@ -29403,25 +30674,25 @@ LBB5_166: LONG $0x2061e3c4; WORD $0x0a1c; BYTE $0x0e // vpinsrb xmm3, xmm3, byte [rdx + rcx], 14 QUAD $0x0000009024ac8b4c // mov r13, qword [rsp + 144] LONG $0x2061a3c4; WORD $0x2a1c; BYTE $0x0f // vpinsrb xmm3, xmm3, byte [rdx + r13], 15 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] QUAD $0x01010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 1 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] QUAD $0x02010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 2 QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] QUAD $0x03010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 3 QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] QUAD $0x04010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 4 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x05010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 5 QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x06010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 6 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x07010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 7 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x08010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 8 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x09010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 9 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] QUAD $0x0a010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 10 LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x0b010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 11 @@ -29446,7 +30717,7 @@ LBB5_166: QUAD $0x0a01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 10 WORD $0x8949; BYTE $0xc3 // mov r11, rax QUAD $0x0b01126c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r10 + 1], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0c01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 12 LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0d01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 13 @@ -29463,26 +30734,26 @@ LBB5_166: QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] LONG $0x027cb60f; BYTE $0x08 // movzx edi, byte [rdx + rax + 8] LONG $0xd76e79c5 // vmovd xmm10, edi - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] QUAD $0x0001e024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 480] QUAD $0x010232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 2], 1 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] + QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] QUAD $0x02021a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 2], 2 QUAD $0x000000f824848b4c // mov r8, qword [rsp + 248] QUAD $0x030202442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 2], 3 QUAD $0x00000100248c8b4c // mov r9, qword [rsp + 256] QUAD $0x04020a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 2], 4 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] QUAD $0x05023a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 2], 5 QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x060202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 6 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x070202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x080202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 8 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x090202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 9 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] QUAD $0x0a0222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 2], 10 LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] QUAD $0x0b022a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 2], 11 @@ -29499,24 +30770,24 @@ LBB5_166: QUAD $0x0102025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 2], 1 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x02020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 2 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] QUAD $0x03020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 3 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x04020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x05020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 5 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] QUAD $0x06020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 6 - QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x07023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x08023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 8 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] QUAD $0x09020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 9 QUAD $0x0a021a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 2], 10 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x0b020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] QUAD $0x0c020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 12 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0d020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 13 @@ -29531,9 +30802,9 @@ LBB5_166: QUAD $0x05033a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 3], 5 QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x06030a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 3], 6 - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] QUAD $0x07033a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 3], 7 - QUAD $0x000000b8248c8b4c // mov r9, qword [rsp + 184] + QUAD $0x000000c8248c8b4c // mov r9, qword [rsp + 200] QUAD $0x08030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 8 QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] QUAD $0x09031a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 3], 9 @@ -29548,24 +30819,24 @@ LBB5_166: QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] QUAD $0x01030a6c2039e3c4 // vpinsrb xmm5, xmm8, byte [rdx + rcx + 3], 1 QUAD $0x0203026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 2 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x0303026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 3 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x04030a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 3], 4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x05030a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 3], 5 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x0603326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 6 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0703326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 7 QUAD $0x08033a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 3], 8 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] QUAD $0x0903326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 9 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0a033a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 3], 10 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x0b033a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 3], 11 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] QUAD $0x0c033a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 3], 12 LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0d033a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 3], 13 @@ -29584,22 +30855,22 @@ LBB5_166: LONG $0x3a7cb60f; BYTE $0x09 // movzx edi, byte [rdx + rdi + 9] LONG $0xdf6e79c5 // vmovd xmm11, edi QUAD $0x0001a024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 416] - QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] + QUAD $0x000000b824a48b4c // mov r12, qword [rsp + 184] QUAD $0x010422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 4], 1 - QUAD $0x000000c824848b4c // mov r8, qword [rsp + 200] + QUAD $0x000000e024848b4c // mov r8, qword [rsp + 224] QUAD $0x020402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 2 QUAD $0x000000f8249c8b48 // mov rbx, qword [rsp + 248] QUAD $0x03041a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 4], 3 QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] QUAD $0x04043a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 4], 4 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x05043a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 4], 5 QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x06043a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 4], 6 QUAD $0x07043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 7 QUAD $0x08040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 8 QUAD $0x09041a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 4], 9 - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] QUAD $0x0a043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 10 LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] QUAD $0x0b041a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 4], 11 @@ -29611,24 +30882,24 @@ LBB5_166: QUAD $0x0f0412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 4], 15 QUAD $0x000000e824948b4c // mov r10, qword [rsp + 232] QUAD $0x0104125c2001a3c4 // vpinsrb xmm3, xmm15, byte [rdx + r10 + 4], 1 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] QUAD $0x02043a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 4], 2 QUAD $0x0304025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 3 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0404025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 4 QUAD $0x05040a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 4], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0604025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 6 - QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x07043a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 4], 7 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0804025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 8 QUAD $0x0904325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 9 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0a04325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 4], 10 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0b04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] QUAD $0x0c040a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 4], 12 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0d040a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 4], 13 @@ -29640,13 +30911,13 @@ LBB5_166: QUAD $0x03051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 3 QUAD $0x0000010024a48b4c // mov r12, qword [rsp + 256] QUAD $0x040522642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 5], 4 - LONG $0x24448b4c; BYTE $0x70 // mov r8, qword [rsp + 112] + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] QUAD $0x050502642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 5], 5 QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] QUAD $0x06051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 6 - LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] + LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] QUAD $0x07051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 7 - QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] + QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] QUAD $0x08051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 8 QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] QUAD $0x09051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 9 @@ -29660,24 +30931,24 @@ LBB5_166: LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] QUAD $0x0f051a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 5], 15 QUAD $0x0105126c2049a3c4 // vpinsrb xmm5, xmm6, byte [rdx + r10 + 5], 1 - QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] - QUAD $0x02051a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 5], 2 QUAD $0x000000d8249c8b48 // mov rbx, qword [rsp + 216] + QUAD $0x02051a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 5], 2 + QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] QUAD $0x03051a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 5], 3 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] + LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] QUAD $0x04051a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 5], 4 - LONG $0x244c8b4c; BYTE $0x38 // mov r9, qword [rsp + 56] + LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] QUAD $0x05050a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 5], 5 - QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] QUAD $0x06051a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 5], 6 QUAD $0x07053a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 5], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x08053a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 5], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x09053a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 5], 9 QUAD $0x0a05326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 5], 10 QUAD $0x0b05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0c05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 12 LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0d05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 13 @@ -29691,9 +30962,9 @@ LBB5_166: QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] LONG $0x027cb60f; BYTE $0x0a // movzx edi, byte [rdx + rax + 10] LONG $0xe76ef9c5 // vmovd xmm4, edi - QUAD $0x000000d0249c8b4c // mov r11, qword [rsp + 208] + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] QUAD $0x01061a442019a3c4 // vpinsrb xmm0, xmm12, byte [rdx + r11 + 6], 1 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] QUAD $0x020602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 2 QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] QUAD $0x030602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 3 @@ -29701,13 +30972,13 @@ LBB5_166: QUAD $0x050602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 5 QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x060602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 6 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x07060a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 6], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x080602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 8 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x090602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 9 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] QUAD $0x0a0602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 10 QUAD $0x0b063a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 6], 11 QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] @@ -29719,26 +30990,26 @@ LBB5_166: LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0f0602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 15 QUAD $0x0106126c2041a3c4 // vpinsrb xmm5, xmm7, byte [rdx + r10 + 6], 1 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] QUAD $0x0206026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 6], 2 - QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] + QUAD $0x000000a024848b4c // mov r8, qword [rsp + 160] QUAD $0x0306026c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r8 + 6], 3 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0406026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 6], 4 QUAD $0x05060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x06063a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 6], 6 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0706026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 6], 7 QUAD $0x0000012024ac8b4c // mov r13, qword [rsp + 288] QUAD $0x08062a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 6], 8 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0906026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 6], 9 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0a06326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 10 - LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] + LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] QUAD $0x0b060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 11 - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + QUAD $0x0000014024a48b4c // mov r12, qword [rsp + 320] QUAD $0x0c06226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 12 LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0d06326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 6], 13 @@ -29747,22 +31018,22 @@ LBB5_166: QUAD $0x0000009024a48b4c // mov r12, qword [rsp + 144] QUAD $0x0f06226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 15 QUAD $0x01071a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 7], 1 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] QUAD $0x020732542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 7], 2 QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] QUAD $0x030732542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 7], 3 QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] QUAD $0x040732542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 7], 4 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x050732542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 7], 5 QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x060732542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 7], 6 QUAD $0x07070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 7 - QUAD $0x000000b824a48b4c // mov r12, qword [rsp + 184] + QUAD $0x000000c824a48b4c // mov r12, qword [rsp + 200] QUAD $0x080722542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 7], 8 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x09070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 9 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] QUAD $0x0a070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 10 LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x0b070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 11 @@ -29772,22 +31043,22 @@ LBB5_166: LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] QUAD $0x0f073a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 7], 15 QUAD $0x0107124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 7], 1 - QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] + QUAD $0x000000d8249c8b48 // mov rbx, qword [rsp + 216] QUAD $0x02071a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 7], 2 QUAD $0x0307024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 7], 3 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x04070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x05070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 5 QUAD $0x06073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 6 - QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x07070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 7 QUAD $0x08072a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 7], 8 QUAD $0x0907024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 7], 9 - LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] + LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] QUAD $0x0a07324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 7], 10 QUAD $0x0b070a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 7], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0c07024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 7], 12 LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0d07024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 7], 13 @@ -29805,24 +31076,24 @@ LBB5_166: QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] LONG $0x027cb60f; BYTE $0x0b // movzx edi, byte [rdx + rax + 11] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x010802442031e3c4 // vpinsrb xmm0, xmm9, byte [rdx + rax + 8], 1 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] QUAD $0x020802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 2 QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] QUAD $0x03080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 3 QUAD $0x0000010024948b4c // mov r10, qword [rsp + 256] QUAD $0x040812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 8], 4 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x050802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 5 QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x060832442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 8], 6 - LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] + LONG $0x24448b4c; BYTE $0x70 // mov r8, qword [rsp + 112] QUAD $0x070802442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 8], 7 QUAD $0x080822442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 8], 8 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x090802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 8], 9 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] QUAD $0x0a0822442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 8], 10 LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0b083a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 8], 11 @@ -29836,42 +31107,42 @@ LBB5_166: QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] QUAD $0x01083a6c2029a3c4 // vpinsrb xmm5, xmm10, byte [rdx + r15 + 8], 1 QUAD $0x02081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 2 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] QUAD $0x03083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 3 - QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x04083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 4 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x05083a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 8], 5 - QUAD $0x000000a8248c8b4c // mov r9, qword [rsp + 168] + QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] QUAD $0x06080a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 8], 6 - QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] + QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] QUAD $0x07082a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 8], 7 QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] QUAD $0x08081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 8 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] QUAD $0x09081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 9 QUAD $0x0a08326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 8], 10 - LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] + LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] QUAD $0x0b08326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 8], 11 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0c081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 12 LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x0d081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 13 LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] QUAD $0x0e08326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 8], 14 QUAD $0x0f081a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 8], 15 - QUAD $0x000000d0249c8b4c // mov r11, qword [rsp + 208] + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] QUAD $0x01091a742039a3c4 // vpinsrb xmm6, xmm8, byte [rdx + r11 + 9], 1 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] + QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] QUAD $0x02091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 2 QUAD $0x03090a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rcx + 9], 3 WORD $0x8948; BYTE $0xcb // mov rbx, rcx QUAD $0x040912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 4 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] + LONG $0x24548b4c; BYTE $0x68 // mov r10, qword [rsp + 104] QUAD $0x050912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 5 QUAD $0x060932742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rsi + 9], 6 QUAD $0x070902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 7 - QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] + QUAD $0x000000c824848b4c // mov r8, qword [rsp + 200] QUAD $0x080902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 8 QUAD $0x090902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 9 QUAD $0x0a0922742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r12 + 9], 10 @@ -29887,24 +31158,24 @@ LBB5_166: QUAD $0x0f0902742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rax + 9], 15 QUAD $0x01093a7c2021a3c4 // vpinsrb xmm7, xmm11, byte [rdx + r15 + 9], 1 WORD $0x894d; BYTE $0xfc // mov r12, r15 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x0209027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 2 QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x0209027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 2 + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x0309027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 3 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0409027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 4 QUAD $0x05093a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rdi + 9], 5 QUAD $0x06090a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r9 + 9], 6 QUAD $0x07092a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 9], 7 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0809027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 8 - LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] QUAD $0x09093a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r15 + 9], 9 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0a09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 10 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0b09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0c09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 12 LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0d09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 13 @@ -29921,21 +31192,21 @@ LBB5_166: QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] LONG $0x027cb60f; BYTE $0x0c // movzx edi, byte [rdx + rax + 12] LONG $0xef6ef9c5 // vmovd xmm5, edi - QUAD $0x000000d0249c8b4c // mov r11, qword [rsp + 208] + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] QUAD $0x010a1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 10], 1 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] QUAD $0x020a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 2 QUAD $0x030a1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 10], 3 QUAD $0x0000010024ac8b4c // mov r13, qword [rsp + 256] QUAD $0x040a2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 10], 4 QUAD $0x050a125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 10], 5 QUAD $0x060a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 6 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x070a3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 10], 7 QUAD $0x080a025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 10], 8 QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] QUAD $0x090a125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 10], 9 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x0a0a3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 10], 10 LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] QUAD $0x0b0a025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 10], 11 @@ -29947,26 +31218,26 @@ LBB5_166: LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x0f0a3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 10], 15 QUAD $0x010a22642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 10], 1 - QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] + QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] QUAD $0x020a32642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 10], 2 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] QUAD $0x030a3a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 10], 3 - QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x040a3a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rdi + 10], 4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x050a0a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 10], 5 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] QUAD $0x060a0a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 10], 6 - QUAD $0x000000a024a48b4c // mov r12, qword [rsp + 160] + QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] QUAD $0x070a22642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 10], 7 QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] QUAD $0x080a0a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 10], 8 QUAD $0x090a3a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 10], 9 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0a0a0a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 10], 10 - LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] + LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] QUAD $0x0b0a3a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 10], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] QUAD $0x0c0a0a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 10], 12 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0d0a0a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 10], 13 @@ -29980,15 +31251,15 @@ LBB5_166: WORD $0x8948; BYTE $0xd8 // mov rax, rbx QUAD $0x040b2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 11], 4 WORD $0x894d; BYTE $0xeb // mov r11, r13 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x050b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 5 QUAD $0x060b324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 11], 6 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x070b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 7 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x080b0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 11], 8 QUAD $0x090b124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 11], 9 - QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] QUAD $0x0a0b124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 11], 10 QUAD $0x0b0b024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 11], 11 QUAD $0x0c0b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 12 @@ -30001,22 +31272,22 @@ LBB5_166: QUAD $0x000000e8249c8b48 // mov rbx, qword [rsp + 232] QUAD $0x010b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 1 QUAD $0x020b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 11], 2 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] QUAD $0x030b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 11], 3 QUAD $0x040b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 11], 4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x050b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 11], 5 - QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] + QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] QUAD $0x060b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 11], 6 QUAD $0x070b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 11], 7 QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] QUAD $0x080b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 11], 8 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] QUAD $0x090b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 11], 9 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0a0b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 11], 10 QUAD $0x0b0b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 11], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] QUAD $0x0c0b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 11], 12 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0d0b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 11], 13 @@ -30034,19 +31305,19 @@ LBB5_166: QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] LONG $0x0a7cb60f; BYTE $0x0d // movzx edi, byte [rdx + rcx + 13] LONG $0xcf6ef9c5 // vmovd xmm1, edi - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] QUAD $0x010c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 1 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] QUAD $0x020c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 2 QUAD $0x030c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 3 QUAD $0x040c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 12], 4 - LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] + LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] QUAD $0x050c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 12], 5 QUAD $0x0000008024848b4c // mov r8, qword [rsp + 128] QUAD $0x060c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 12], 6 - LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] + LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] QUAD $0x070c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 12], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x080c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 8 QUAD $0x0000009824b48b4c // mov r14, qword [rsp + 152] QUAD $0x090c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 12], 9 @@ -30062,26 +31333,26 @@ LBB5_166: LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] QUAD $0x0f0c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 12], 15 QUAD $0x010c1a542051e3c4 // vpinsrb xmm2, xmm5, byte [rdx + rbx + 12], 1 - QUAD $0x000000e024bc8b4c // mov r15, qword [rsp + 224] + QUAD $0x000000d824bc8b4c // mov r15, qword [rsp + 216] QUAD $0x020c3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 12], 2 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] QUAD $0x030c3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 12], 3 - QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] + LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] QUAD $0x040c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 4 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] QUAD $0x050c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 5 QUAD $0x060c2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 12], 6 - QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] + QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] QUAD $0x070c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 7 QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] QUAD $0x080c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 8 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] QUAD $0x090c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 9 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] QUAD $0x0a0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 10 - LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x0b0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 11 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0c0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 12 LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x0d0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 13 @@ -30101,7 +31372,7 @@ LBB5_166: QUAD $0x070d1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 13], 7 QUAD $0x080d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 13], 8 QUAD $0x090d325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 13], 9 - QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x000000d024848b4c // mov r8, qword [rsp + 208] QUAD $0x0a0d025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 13], 10 LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0b0d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 13], 11 @@ -30116,23 +31387,23 @@ LBB5_166: QUAD $0x010d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 1 QUAD $0x020d3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 13], 2 QUAD $0x030d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 3 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x040d0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 13], 4 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x050d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x060d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 6 - QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x070d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 7 QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] QUAD $0x080d124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 13], 8 - LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x24648b4c; BYTE $0x40 // mov r12, qword [rsp + 64] QUAD $0x090d224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 13], 9 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0a0d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 10 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x0b0d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 11 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] QUAD $0x0c0d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 12 LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0d0d3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 13], 13 @@ -30150,18 +31421,18 @@ LBB5_166: QUAD $0x0000010824bc8b48 // mov rdi, qword [rsp + 264] LONG $0x3a7cb60f; BYTE $0x0e // movzx edi, byte [rdx + rdi + 14] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] + QUAD $0x000000b8248c8b4c // mov r9, qword [rsp + 184] QUAD $0x010e0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 14], 1 QUAD $0x020e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 14], 2 QUAD $0x030e324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 14], 3 QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] QUAD $0x040e324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 14], 4 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x050e3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 14], 5 QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x060e3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 14], 6 QUAD $0x070e1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 14], 7 - QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] + QUAD $0x000000c8249c8b4c // mov r11, qword [rsp + 200] QUAD $0x080e1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 14], 8 QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x090e3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 14], 9 @@ -30176,24 +31447,24 @@ LBB5_166: QUAD $0x0f0e324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 14], 15 QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] QUAD $0x010e02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 14], 1 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] QUAD $0x020e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 2 - QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] + QUAD $0x000000a024b48b4c // mov r14, qword [rsp + 160] QUAD $0x030e32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 14], 3 QUAD $0x040e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x050e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 5 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] QUAD $0x060e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 6 - QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x070e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 7 QUAD $0x080e12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 14], 8 QUAD $0x090e22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 14], 9 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0a0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 10 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x0b0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] QUAD $0x0c0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 12 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0d0e0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 14], 13 @@ -30204,21 +31475,21 @@ LBB5_166: LONG $0x3a7cb60f; BYTE $0x0f // movzx edi, byte [rdx + rdi + 15] LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x010f0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 15], 1 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x020f3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 15], 2 QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] QUAD $0x030f3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 15], 3 QUAD $0x040f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 4 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x050f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 5 QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] QUAD $0x060f22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 15], 6 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] QUAD $0x070f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 7 QUAD $0x080f1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 15], 8 QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] QUAD $0x090f1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 15], 9 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] QUAD $0x0a0f32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 15], 10 QUAD $0x0b0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 11 QUAD $0x0c0f1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 15], 12 @@ -30231,26 +31502,26 @@ LBB5_166: LONG $0x7cb60f42; WORD $0x0f12 // movzx edi, byte [rdx + r10 + 15] LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x010f025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 15], 1 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] + QUAD $0x000000d824ac8b4c // mov r13, qword [rsp + 216] QUAD $0x020f2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 15], 2 QUAD $0x030f325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 15], 3 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x040f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 4 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x050f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x060f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 6 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x070f025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 15], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x080f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x090f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 9 - LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] QUAD $0x0a0f3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 15], 10 - LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x0b0f1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 15], 11 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] QUAD $0x0c0f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 12 QUAD $0x0d0f0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 15], 13 LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] @@ -30264,23 +31535,23 @@ LBB5_166: QUAD $0x000000f0248c8b4c // mov r9, qword [rsp + 240] LONG $0x7cb60f42; WORD $0x100a // movzx edi, byte [rdx + r9 + 16] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] QUAD $0x01100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 1 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] QUAD $0x02100a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 16], 2 QUAD $0x000000f824848b4c // mov r8, qword [rsp + 248] QUAD $0x031002442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 16], 3 QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] QUAD $0x04103a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 16], 4 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x05103a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 16], 5 QUAD $0x061022442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 16], 6 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x07103a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 16], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x08103a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 16], 8 QUAD $0x09101a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 16], 9 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] QUAD $0x0a1022442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 16], 10 LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0b103a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 16], 11 @@ -30296,22 +31567,22 @@ LBB5_166: QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] QUAD $0x0110324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 1 QUAD $0x02102a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 16], 2 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x0310324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 3 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0410324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 4 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] QUAD $0x0510324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 5 - QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] + QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] QUAD $0x06101a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 16], 6 QUAD $0x0710024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 7 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0810024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 8 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0910024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 9 QUAD $0x0a103a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 16], 10 QUAD $0x0b101a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 16], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0c10024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 12 LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x0d101a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 16], 13 @@ -30320,19 +31591,19 @@ LBB5_166: QUAD $0x0f10324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 16], 15 LONG $0x7cb60f42; WORD $0x110a // movzx edi, byte [rdx + r9 + 17] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x011102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 1 QUAD $0x02110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 2 QUAD $0x031102542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 17], 3 QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] QUAD $0x041102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 4 - LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] + LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] QUAD $0x051132542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 17], 5 QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x061132542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 17], 6 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x071102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 7 - QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] + QUAD $0x000000c824bc8b4c // mov r15, qword [rsp + 200] QUAD $0x08113a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 17], 8 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x091102542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 17], 9 @@ -30351,26 +31622,26 @@ LBB5_166: LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x000000e824a48b4c // mov r12, qword [rsp + 232] QUAD $0x0111225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 17], 1 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] QUAD $0x02110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 2 - QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] + QUAD $0x000000a024848b4c // mov r8, qword [rsp + 160] QUAD $0x0311025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 3 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x04110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x05110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 5 QUAD $0x06111a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 17], 6 - QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] QUAD $0x07110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 7 QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] QUAD $0x08110a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 17], 8 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] QUAD $0x09110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 9 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0a110a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 17], 10 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x0b113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 11 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] QUAD $0x0c113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 12 WORD $0x8949; BYTE $0xdb // mov r11, rbx QUAD $0x0d111a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 17], 13 @@ -30385,9 +31656,9 @@ LBB5_166: QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] LONG $0x3a7cb60f; BYTE $0x12 // movzx edi, byte [rdx + rdi + 18] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] QUAD $0x01123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 1 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x02123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 2 QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] QUAD $0x03123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 3 @@ -30395,12 +31666,12 @@ LBB5_166: QUAD $0x04123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 4 QUAD $0x051232442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 18], 5 QUAD $0x061232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 6 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] QUAD $0x071232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 7 QUAD $0x08123a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 18], 8 QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x091232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 9 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] QUAD $0x0a1232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 10 LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] QUAD $0x0b1232442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 18], 11 @@ -30413,25 +31684,25 @@ LBB5_166: LONG $0x7cb60f42; WORD $0x1212 // movzx edi, byte [rdx + r10 + 18] LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x0112224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 18], 1 - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] + QUAD $0x000000d824948b4c // mov r10, qword [rsp + 216] QUAD $0x0212124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 18], 2 QUAD $0x0312024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 3 WORD $0x894d; BYTE $0xc4 // mov r12, r8 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0412324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 4 - LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] QUAD $0x0512324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 18], 5 - QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] + QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] QUAD $0x0612024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 6 - QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] + QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] QUAD $0x07122a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 18], 7 QUAD $0x08120a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 18], 8 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0912024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 18], 9 QUAD $0x0a120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 10 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x0b120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] QUAD $0x0c120a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 18], 12 QUAD $0x0d121a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 18], 13 LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] @@ -30440,25 +31711,25 @@ LBB5_166: QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] LONG $0x7cb60f42; WORD $0x133a // movzx edi, byte [rdx + r15 + 19] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] QUAD $0x01130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 1 - QUAD $0x000000c8248c8b4c // mov r9, qword [rsp + 200] + QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] QUAD $0x02130a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 19], 2 QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] QUAD $0x03130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 3 QUAD $0x00000100249c8b48 // mov rbx, qword [rsp + 256] QUAD $0x04131a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 19], 4 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x05130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 5 QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x06130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 6 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x07130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 7 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x08130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 8 QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] QUAD $0x09131a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 19], 9 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] QUAD $0x0a130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 10 LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x0b130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 11 @@ -30485,11 +31756,11 @@ LBB5_166: QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] QUAD $0x0813325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 8 QUAD $0x0913025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 9 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0a13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 10 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0b13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 11 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + QUAD $0x0000014024948b4c // mov r10, qword [rsp + 320] QUAD $0x0c13125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 19], 12 LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0d13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 13 @@ -30503,22 +31774,22 @@ LBB5_166: QUAD $0x00034024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 832], ymm0 LONG $0x7cb60f42; WORD $0x143a // movzx edi, byte [rdx + r15 + 20] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x000000d024848b4c // mov r8, qword [rsp + 208] + QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] QUAD $0x011402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 20], 1 QUAD $0x02140a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 20], 2 QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] QUAD $0x03143a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 20], 3 QUAD $0x04141a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 20], 4 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x051432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 5 QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x061402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 6 - LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] + LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] QUAD $0x07140a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 20], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x081402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 8 QUAD $0x09141a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 20], 9 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] QUAD $0x0a1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 10 LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0b1402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 20], 11 @@ -30534,23 +31805,23 @@ LBB5_166: LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] QUAD $0x01140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 1 - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] QUAD $0x02140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 2 QUAD $0x0314224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 20], 3 - QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x04143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 4 - LONG $0x245c8b4c; BYTE $0x38 // mov r11, qword [rsp + 56] + LONG $0x245c8b4c; BYTE $0x48 // mov r11, qword [rsp + 72] QUAD $0x05141a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 20], 5 QUAD $0x0614324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 20], 6 - QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x07143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x08143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 8 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] QUAD $0x09141a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 20], 9 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0a143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 10 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x0b143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 11 QUAD $0x0c14124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 20], 12 LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] @@ -30562,7 +31833,7 @@ LBB5_166: LONG $0x3a7cb60f; BYTE $0x15 // movzx edi, byte [rdx + rdi + 21] LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x011502542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 21], 1 - QUAD $0x000000c824ac8b4c // mov r13, qword [rsp + 200] + QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] QUAD $0x02152a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 21], 2 QUAD $0x03153a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 21], 3 QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] @@ -30571,11 +31842,11 @@ LBB5_166: QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x061532542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 21], 6 QUAD $0x07150a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 21], 7 - QUAD $0x000000b8248c8b4c // mov r9, qword [rsp + 184] + QUAD $0x000000c8248c8b4c // mov r9, qword [rsp + 200] QUAD $0x08150a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 21], 8 QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x09153a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 21], 9 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x0a153a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 21], 10 LONG $0x24548b4c; BYTE $0x60 // mov r10, qword [rsp + 96] QUAD $0x0b1512542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 21], 11 @@ -30592,22 +31863,22 @@ LBB5_166: QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x0115025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 1 QUAD $0x02150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 2 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x0315025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 3 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0415025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 4 QUAD $0x05151a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 21], 5 QUAD $0x0615325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 21], 6 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0715025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 7 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0815025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 8 QUAD $0x09151a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 21], 9 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0a15025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 10 - LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x0b151a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 21], 11 - LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] + QUAD $0x0000014024b48b4c // mov r14, qword [rsp + 320] QUAD $0x0c15325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 21], 12 LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0d15025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 21], 13 @@ -30621,21 +31892,21 @@ LBB5_166: QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x027cb60f; BYTE $0x16 // movzx edi, byte [rdx + rax + 22] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x011602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 22], 1 QUAD $0x02162a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 22], 2 QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] QUAD $0x031602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 22], 3 QUAD $0x04163a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 22], 4 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x05163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 5 QUAD $0x061632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 6 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] QUAD $0x071632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 7 QUAD $0x08160a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 22], 8 QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x091632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 9 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] QUAD $0x0a1632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 22], 10 QUAD $0x0b1612442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 22], 11 QUAD $0x0c1602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 22], 12 @@ -30650,23 +31921,23 @@ LBB5_166: LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] QUAD $0x01163a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 22], 1 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] - QUAD $0x02163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 2 QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x02163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 2 + QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] QUAD $0x03163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 3 - QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x04163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 4 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x05163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x06163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 6 - QUAD $0x000000a024a48b4c // mov r12, qword [rsp + 160] + QUAD $0x000000a824a48b4c // mov r12, qword [rsp + 168] QUAD $0x0716224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 22], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x08163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x09163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 9 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0a163a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 22], 10 QUAD $0x0b161a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 22], 11 QUAD $0x0c16324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 22], 12 @@ -30678,18 +31949,18 @@ LBB5_166: QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] LONG $0x1a7cb60f; BYTE $0x17 // movzx edi, byte [rdx + rbx + 23] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] QUAD $0x01173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 1 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x02173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 2 QUAD $0x031702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 3 QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] QUAD $0x041702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 4 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x05173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 5 QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] QUAD $0x061732542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 23], 6 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x07173a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 23], 7 QUAD $0x08170a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 23], 8 QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] @@ -30706,27 +31977,27 @@ LBB5_166: LONG $0x7cb60f42; WORD $0x171a // movzx edi, byte [rdx + r11 + 23] LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x01173a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 23], 1 - QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] QUAD $0x0217325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 23], 2 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] QUAD $0x03173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 3 - QUAD $0x0000014024bc8b4c // mov r15, qword [rsp + 320] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] QUAD $0x04173a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 23], 4 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x05173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x06173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 6 QUAD $0x0717225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 23], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x08173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x09173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 9 - LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] - QUAD $0x0a17225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 23], 10 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x0a173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 10 + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x0b173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 11 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] - QUAD $0x0c17125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 23], 12 + QUAD $0x0000014024a48b4c // mov r12, qword [rsp + 320] + QUAD $0x0c17225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 23], 12 QUAD $0x0d17025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 23], 13 LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] QUAD $0x0e173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 14 @@ -30735,22 +32006,22 @@ LBB5_166: LONG $0x386563c4; WORD $0x01da // vinserti128 ymm11, ymm3, xmm2, 1 LONG $0x1a7cb60f; BYTE $0x18 // movzx edi, byte [rdx + rbx + 24] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] QUAD $0x01180a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 24], 1 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] QUAD $0x02180a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 24], 2 - QUAD $0x000000f824848b4c // mov r8, qword [rsp + 248] - QUAD $0x031802442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 24], 3 + QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] + QUAD $0x031812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 24], 3 QUAD $0x041802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 4 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x051802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 5 QUAD $0x061832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 24], 6 - LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] + LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] QUAD $0x07181a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 24], 7 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] - QUAD $0x08180a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 24], 8 + QUAD $0x000000c824b48b4c // mov r14, qword [rsp + 200] + QUAD $0x081832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 24], 8 QUAD $0x09180a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 24], 9 - QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] + QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] QUAD $0x0a180a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 24], 10 LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x0b180a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 24], 11 @@ -30764,50 +32035,50 @@ LBB5_166: QUAD $0x0000010824bc8b48 // mov rdi, qword [rsp + 264] LONG $0x3a7cb60f; BYTE $0x18 // movzx edi, byte [rdx + rdi + 24] LONG $0xcf6ef9c5 // vmovd xmm1, edi - QUAD $0x000000e824b48b4c // mov r14, qword [rsp + 232] - QUAD $0x0118324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 24], 1 + QUAD $0x000000e8249c8b48 // mov rbx, qword [rsp + 232] + QUAD $0x01181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 1 QUAD $0x0218324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 2 - QUAD $0x000000d824ac8b4c // mov r13, qword [rsp + 216] - QUAD $0x03182a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 24], 3 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x0318324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 3 QUAD $0x04183a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 24], 4 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] QUAD $0x0518324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 5 - QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x0618324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 6 - QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] - QUAD $0x07181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 7 + QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] + QUAD $0x0718024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 7 QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] QUAD $0x0818324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 8 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] QUAD $0x0918324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 9 - QUAD $0x0a18224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 24], 10 - LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x0a18324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 10 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0b18324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 11 - QUAD $0x0c18124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 24], 12 + QUAD $0x0c18224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 24], 12 LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0d18324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 13 - LONG $0x247c8b4c; BYTE $0x58 // mov r15, qword [rsp + 88] - QUAD $0x0e183a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 24], 14 + LONG $0x246c8b4c; BYTE $0x58 // mov r13, qword [rsp + 88] + QUAD $0x0e182a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 24], 14 QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] QUAD $0x0f18324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 15 QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] LONG $0x327cb60f; BYTE $0x19 // movzx edi, byte [rdx + rsi + 25] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] - QUAD $0x011912542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 25], 1 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x000000b824a48b4c // mov r12, qword [rsp + 184] + QUAD $0x011922542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 25], 1 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] QUAD $0x021932542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 25], 2 - QUAD $0x031902542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 25], 3 + QUAD $0x031912542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 25], 3 QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] QUAD $0x04193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 4 QUAD $0x051902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 5 QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x061902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 6 QUAD $0x07191a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 25], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] - QUAD $0x08193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 8 - QUAD $0x0000009824848b4c // mov r8, qword [rsp + 152] - QUAD $0x091902542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 25], 9 + QUAD $0x081932542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 25], 8 + QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] + QUAD $0x091912542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 25], 9 QUAD $0x0a190a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 25], 10 QUAD $0x0b190a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 25], 11 QUAD $0x00000088248c8b4c // mov r9, qword [rsp + 136] @@ -30821,29 +32092,30 @@ LBB5_166: QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] LONG $0x0a7cb60f; BYTE $0x19 // movzx edi, byte [rdx + rcx + 25] LONG $0xdf6ef9c5 // vmovd xmm3, edi - QUAD $0x0119325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 25], 1 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x01191a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 25], 1 + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] QUAD $0x02193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 2 - QUAD $0x03192a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 25], 3 - QUAD $0x0000014024b48b4c // mov r14, qword [rsp + 320] - QUAD $0x0419325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 25], 4 - LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] - QUAD $0x05192a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 25], 5 - QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x03193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 3 + QUAD $0x04193a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 25], 4 + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + QUAD $0x05193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 5 + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x06193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 6 - QUAD $0x07191a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 25], 7 + QUAD $0x0719025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 25], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x08193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x09193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 9 - QUAD $0x0a19225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 25], 10 - LONG $0x24648b4c; BYTE $0x48 // mov r12, qword [rsp + 72] - QUAD $0x0b19225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 25], 11 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] + QUAD $0x0a19325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 25], 10 + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + QUAD $0x0b193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 11 + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0c191a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 25], 12 LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0d193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 13 - QUAD $0x0e193a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 25], 14 + QUAD $0x0e192a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 25], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00022024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 544], ymm0 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] @@ -30853,21 +32125,21 @@ LBB5_166: QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] LONG $0x3a7cb60f; BYTE $0x1a // movzx edi, byte [rdx + rdi + 26] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x011a12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 26], 1 + QUAD $0x011a22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 26], 1 QUAD $0x021a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 2 QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] QUAD $0x031a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 3 - QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] - QUAD $0x041a3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 26], 4 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x0000010024a48b4c // mov r12, qword [rsp + 256] + QUAD $0x041a22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 26], 4 + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x051a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 5 QUAD $0x061a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 6 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x071a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x081a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 8 - QUAD $0x091a02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 26], 9 - QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x091a12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 26], 9 + QUAD $0x000000d024848b4c // mov r8, qword [rsp + 208] QUAD $0x0a1a02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 26], 10 LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] QUAD $0x0b1a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 11 @@ -30881,26 +32153,27 @@ LBB5_166: LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] QUAD $0x011a0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 26], 1 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] QUAD $0x021a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 2 - QUAD $0x000000d824948b4c // mov r10, qword [rsp + 216] + QUAD $0x000000a024948b4c // mov r10, qword [rsp + 160] QUAD $0x031a124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 26], 3 - QUAD $0x041a324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 26], 4 - QUAD $0x051a2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 26], 5 - QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] + QUAD $0x041a3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 26], 4 + LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] + QUAD $0x051a3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 26], 5 + QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] QUAD $0x061a1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 26], 6 - QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] + QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] QUAD $0x071a2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 26], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x081a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x091a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 9 - LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] QUAD $0x0a1a324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 26], 10 - QUAD $0x0b1a224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 26], 11 + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + QUAD $0x0b1a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 11 QUAD $0x0c1a1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 26], 12 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] - QUAD $0x0d1a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 13 + LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] + QUAD $0x0d1a324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 26], 13 LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] QUAD $0x0e1a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 14 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] @@ -30908,18 +32181,18 @@ LBB5_166: QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] LONG $0x3a7cb60f; BYTE $0x1b // movzx edi, byte [rdx + rdi + 27] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x000000d0249c8b48 // mov rbx, qword [rsp + 208] + QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] QUAD $0x011b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 27], 1 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x021b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 2 QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] QUAD $0x031b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 3 - QUAD $0x041b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 27], 4 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x041b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 27], 4 + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x051b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 5 QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x061b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 6 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x071b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 7 QUAD $0x081b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 27], 8 QUAD $0x0000009824a48b4c // mov r12, qword [rsp + 152] @@ -30937,26 +32210,25 @@ LBB5_166: LONG $0x327cb60f; BYTE $0x1b // movzx edi, byte [rdx + rsi + 27] LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x011b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 1 - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] QUAD $0x021b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 2 QUAD $0x031b125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 27], 3 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x041b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 4 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x051b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 5 + QUAD $0x051b3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 27], 5 QUAD $0x061b1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 27], 6 QUAD $0x071b2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 27], 7 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x081b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 8 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x091b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 9 - QUAD $0x0a1b325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 27], 10 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + QUAD $0x0a1b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 10 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x0b1b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 11 - LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] - QUAD $0x0c1b0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 27], 12 - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] - QUAD $0x0d1b1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 27], 13 + QUAD $0x00000140249c8b4c // mov r11, qword [rsp + 320] + QUAD $0x0c1b1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 27], 12 + QUAD $0x0d1b325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 27], 13 LONG $0x247c8b4c; BYTE $0x58 // mov r15, qword [rsp + 88] QUAD $0x0e1b3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 27], 14 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] @@ -30969,23 +32241,23 @@ LBB5_166: LONG $0x3a7cb60f; BYTE $0x1c // movzx edi, byte [rdx + rdi + 28] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x011c1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 28], 1 - QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] - QUAD $0x021c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 2 + QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] + QUAD $0x021c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 28], 2 QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] QUAD $0x031c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 3 - QUAD $0x0000010024b48b4c // mov r14, qword [rsp + 256] - QUAD $0x041c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 4 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x0000010024948b4c // mov r10, qword [rsp + 256] + QUAD $0x041c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 4 + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x051c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 5 QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x061c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 6 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x071c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 7 - QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] + QUAD $0x000000c824848b4c // mov r8, qword [rsp + 200] QUAD $0x081c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 8 QUAD $0x091c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 28], 9 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] - QUAD $0x0a1c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 10 + QUAD $0x000000d024b48b4c // mov r14, qword [rsp + 208] + QUAD $0x0a1c32442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 28], 10 LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0b1c3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 28], 11 QUAD $0x0000008824a48b4c // mov r12, qword [rsp + 136] @@ -31000,50 +32272,49 @@ LBB5_166: LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] QUAD $0x011c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 1 - QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] - QUAD $0x021c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 2 QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x021c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 2 + QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x031c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 3 - QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x041c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 4 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x051c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 5 - QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] QUAD $0x061c1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 28], 6 - QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x071c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x081c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 8 QUAD $0x091c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 9 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0a1c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 10 QUAD $0x0b1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 11 - QUAD $0x0c1c0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 28], 12 - WORD $0x894c; BYTE $0xd9 // mov rcx, r11 - QUAD $0x0d1c1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 28], 13 + QUAD $0x0c1c1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 28], 12 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x0d1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 13 QUAD $0x0e1c3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 28], 14 QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] QUAD $0x0f1c1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 28], 15 QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] LONG $0x027cb60f; BYTE $0x1d // movzx edi, byte [rdx + rax + 29] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] - QUAD $0x011d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 1 - QUAD $0x021d12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 29], 2 + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x011d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 1 + QUAD $0x021d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 2 QUAD $0x000000f824bc8b48 // mov rdi, qword [rsp + 248] QUAD $0x031d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 3 - QUAD $0x041d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 4 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] + QUAD $0x041d12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 29], 4 + LONG $0x24548b4c; BYTE $0x68 // mov r10, qword [rsp + 104] QUAD $0x051d12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 29], 5 QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x061d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 6 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x071d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 7 + LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] + QUAD $0x071d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 7 QUAD $0x081d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 8 QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x091d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 9 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] - QUAD $0x0a1d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 10 + QUAD $0x0a1d32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 29], 10 LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0b1d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 11 QUAD $0x0c1d22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 29], 12 @@ -31057,25 +32328,25 @@ LBB5_166: LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] QUAD $0x011d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 1 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] + QUAD $0x000000d824ac8b4c // mov r13, qword [rsp + 216] QUAD $0x021d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 29], 2 QUAD $0x031d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 3 - QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x041d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 4 - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] + LONG $0x24648b4c; BYTE $0x48 // mov r12, qword [rsp + 72] QUAD $0x051d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 29], 5 QUAD $0x061d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 29], 6 - QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] + QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x071d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 7 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x081d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 8 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x091d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 9 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0a1d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 10 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x0b1d3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 29], 11 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] + QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0c1d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 29], 12 QUAD $0x0d1d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 13 QUAD $0x0e1d3a642061a3c4 // vpinsrb xmm4, xmm3, byte [rdx + r15 + 29], 14 @@ -31086,11 +32357,12 @@ LBB5_166: QUAD $0x0002c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 704], ymm0 LONG $0x027cb60f; BYTE $0x1e // movzx edi, byte [rdx + rax + 30] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x011e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 30], 1 + QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] + QUAD $0x011e3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 30], 1 LONG $0x027cb60f; BYTE $0x1f // movzx edi, byte [rdx + rax + 31] LONG $0xcf6ef9c5 // vmovd xmm1, edi - QUAD $0x011f0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 31], 1 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x011f3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 31], 1 + QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] QUAD $0x021e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 2 QUAD $0x021f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 2 QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] @@ -31105,17 +32377,16 @@ LBB5_166: QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x061e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 6 QUAD $0x061f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 6 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x071e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 7 - QUAD $0x071f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 7 - QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x071e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 30], 7 + QUAD $0x071f0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 31], 7 + QUAD $0x00000110248c8b4c // mov r9, qword [rsp + 272] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x081e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 8 QUAD $0x081f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 8 QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x091e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 9 QUAD $0x091f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 9 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] QUAD $0x0a1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 10 QUAD $0x0a1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 10 LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] @@ -31141,15 +32412,15 @@ LBB5_166: QUAD $0x011f127c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r10 + 31], 1 QUAD $0x021e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 30], 2 QUAD $0x021f2a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r13 + 31], 2 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x031e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 3 QUAD $0x031f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 3 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x041e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 4 QUAD $0x041f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 4 QUAD $0x051e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 30], 5 QUAD $0x051f227c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r12 + 31], 5 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x061e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 6 QUAD $0x061f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 6 QUAD $0x071e324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 30], 7 @@ -31157,13 +32428,13 @@ LBB5_166: QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x081e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 8 QUAD $0x081f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 8 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x091e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 9 QUAD $0x091f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 9 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0a1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 10 QUAD $0x0a1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 10 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0b1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 11 QUAD $0x0b1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 11 WORD $0x8948; BYTE $0xd8 // mov rax, rbx @@ -31297,10 +32568,10 @@ LBB5_166: LONG $0x3865e3c4; WORD $0x01e0 // vinserti128 ymm4, ymm3, xmm0, 1 LONG $0x4665e3c4; WORD $0x31c0 // vperm2i128 ymm0, ymm3, ymm0, 49 QUAD $0x00000198248c8b48 // mov rcx, qword [rsp + 408] - LONG $0x7f7ec1c4; WORD $0x8f44; BYTE $0x60 // vmovdqu yword [r15 + 4*rcx + 96], ymm0 - LONG $0x7f7ec1c4; WORD $0x8f54; BYTE $0x40 // vmovdqu yword [r15 + 4*rcx + 64], ymm2 - LONG $0x7f7ec1c4; WORD $0x8f64; BYTE $0x20 // vmovdqu yword [r15 + 4*rcx + 32], ymm4 - LONG $0x7f7ec1c4; WORD $0x8f0c // vmovdqu yword [r15 + 4*rcx], ymm1 + LONG $0x7f7ec1c4; WORD $0x8944; BYTE $0x60 // vmovdqu yword [r9 + 4*rcx + 96], ymm0 + LONG $0x7f7ec1c4; WORD $0x8954; BYTE $0x40 // vmovdqu yword [r9 + 4*rcx + 64], ymm2 + LONG $0x7f7ec1c4; WORD $0x8964; BYTE $0x20 // vmovdqu yword [r9 + 4*rcx + 32], ymm4 + LONG $0x7f7ec1c4; WORD $0x890c // vmovdqu yword [r9 + 4*rcx], ymm1 LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xce // mov rsi, rcx QUAD $0x00000178248c3b48 // cmp rcx, qword [rsp + 376] @@ -31312,7 +32583,7 @@ LBB5_166: QUAD $0x0000018824948b48 // mov rdx, qword [rsp + 392] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] JNE LBB5_43 - JMP LBB5_129 + JMP LBB5_46 LBB5_168: LONG $0xe0e68349 // and r14, -32 @@ -31321,13 +32592,14 @@ LBB5_168: WORD $0x0148; BYTE $0xd0 // add rax, rdx QUAD $0x0000018824848948 // mov qword [rsp + 392], rax QUAD $0x0000017824b4894c // mov qword [rsp + 376], r14 - LONG $0xb7048d4b // lea rax, [r15 + 4*r14] + QUAD $0x00000000b5048d4a // lea rax, [4*r14] + WORD $0x014c; BYTE $0xe8 // add rax, r13 QUAD $0x0000019024848948 // mov qword [rsp + 400], rax LONG $0x6e79c1c4; BYTE $0xc3 // vmovd xmm0, r11d LONG $0x787de2c4; BYTE $0xc0 // vpbroadcastb ymm0, xmm0 QUAD $0x00020024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 512], ymm0 WORD $0xdb31 // xor ebx, ebx - QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 + QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 LBB5_169: QUAD $0x00000198249c8948 // mov qword [rsp + 408], rbx @@ -31337,7 +32609,7 @@ LBB5_169: QUAD $0x000000e024848948 // mov qword [rsp + 224], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x40c88348 // or rax, 64 - QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + QUAD $0x000000c824848948 // mov qword [rsp + 200], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x60c88348 // or rax, 96 QUAD $0x0000008824848948 // mov qword [rsp + 136], rax @@ -31349,13 +32621,13 @@ LBB5_169: LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00c00d48; WORD $0x0000 // or rax, 192 - QUAD $0x000000d024848948 // mov qword [rsp + 208], rax + QUAD $0x0000009824848948 // mov qword [rsp + 152], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x00e00d48; WORD $0x0000 // or rax, 224 QUAD $0x0000009024848948 // mov qword [rsp + 144], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01000d48; WORD $0x0000 // or rax, 256 - QUAD $0x000000b824848948 // mov qword [rsp + 184], rax + QUAD $0x000000d024848948 // mov qword [rsp + 208], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01200d48; WORD $0x0000 // or rax, 288 QUAD $0x0000010824848948 // mov qword [rsp + 264], rax @@ -31394,7 +32666,7 @@ LBB5_169: LONG $0x1a44b60f; BYTE $0x05 // movzx eax, byte [rdx + rbx + 5] LONG $0xf06ef9c5 // vmovd xmm6, eax LONG $0x0a44b60f; BYTE $0x06 // movzx eax, byte [rdx + rcx + 6] - QUAD $0x000000f0248c8948 // mov qword [rsp + 240], rcx + QUAD $0x000000d8248c8948 // mov qword [rsp + 216], rcx LONG $0xe06e79c5 // vmovd xmm12, eax LONG $0x1a44b60f; BYTE $0x06 // movzx eax, byte [rdx + rbx + 6] LONG $0xf86ef9c5 // vmovd xmm7, eax @@ -31404,7 +32676,7 @@ LBB5_169: LONG $0xc86ef9c5 // vmovd xmm1, eax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01600d48; WORD $0x0000 // or rax, 352 - LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01800d48; WORD $0x0000 // or rax, 384 LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax @@ -31413,7 +32685,7 @@ LBB5_169: QUAD $0x0000014024848948 // mov qword [rsp + 320], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x01e00d48; WORD $0x0000 // or rax, 480 QUAD $0x0000012024848948 // mov qword [rsp + 288], rax @@ -31422,11 +32694,11 @@ LBB5_169: QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 WORD $0x8948; BYTE $0xd9 // mov rcx, rbx LONG $0x40c98148; WORD $0x0002; BYTE $0x00 // or rcx, 576 - QUAD $0x000000b0248c8948 // mov qword [rsp + 176], rcx + QUAD $0x000000b8248c8948 // mov qword [rsp + 184], rcx WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x02600d48; WORD $0x0000 // or rax, 608 WORD $0x8949; BYTE $0xc5 // mov r13, rax - QUAD $0x000000c824848948 // mov qword [rsp + 200], rax + QUAD $0x000000f024848948 // mov qword [rsp + 240], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x02800d48; WORD $0x0000 // or rax, 640 QUAD $0x000000a024848948 // mov qword [rsp + 160], rax @@ -31438,13 +32710,13 @@ LBB5_169: QUAD $0x000000e8248c894c // mov qword [rsp + 232], r9 WORD $0x8949; BYTE $0xdb // mov r11, rbx LONG $0xe0cb8149; WORD $0x0002; BYTE $0x00 // or r11, 736 - LONG $0x245c894c; BYTE $0x60 // mov qword [rsp + 96], r11 + LONG $0x245c894c; BYTE $0x68 // mov qword [rsp + 104], r11 WORD $0x8949; BYTE $0xdc // mov r12, rbx LONG $0x00cc8149; WORD $0x0003; BYTE $0x00 // or r12, 768 LONG $0x2464894c; BYTE $0x70 // mov qword [rsp + 112], r12 WORD $0x8949; BYTE $0xd8 // mov r8, rbx LONG $0x20c88149; WORD $0x0003; BYTE $0x00 // or r8, 800 - LONG $0x2444894c; BYTE $0x38 // mov qword [rsp + 56], r8 + LONG $0x2444894c; BYTE $0x30 // mov qword [rsp + 48], r8 WORD $0x8949; BYTE $0xdf // mov r15, rbx LONG $0x40cf8149; WORD $0x0003; BYTE $0x00 // or r15, 832 LONG $0x247c894c; BYTE $0x78 // mov qword [rsp + 120], r15 @@ -31454,7 +32726,7 @@ LBB5_169: WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x03800d48; WORD $0x0000 // or rax, 896 WORD $0x8948; BYTE $0xc7 // mov rdi, rax - LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax WORD $0x8948; BYTE $0xd8 // mov rax, rbx LONG $0x03a00d48; WORD $0x0000 // or rax, 928 WORD $0x8948; BYTE $0xc6 // mov rsi, rax @@ -31481,10 +32753,10 @@ LBB5_169: LONG $0x2079e3c4; WORD $0x0204; BYTE $0x0e // vpinsrb xmm0, xmm0, byte [rdx + rax], 14 LONG $0x2079e3c4; WORD $0x1a04; BYTE $0x0f // vpinsrb xmm0, xmm0, byte [rdx + rbx], 15 WORD $0x8949; BYTE $0xdc // mov r12, rbx - QUAD $0x00000098249c8948 // mov qword [rsp + 152], rbx + QUAD $0x000000b0249c8948 // mov qword [rsp + 176], rbx QUAD $0x000000e0249c8b4c // mov r11, qword [rsp + 224] LONG $0x2061a3c4; WORD $0x1a1c; BYTE $0x01 // vpinsrb xmm3, xmm3, byte [rdx + r11], 1 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] LONG $0x2061e3c4; WORD $0x021c; BYTE $0x02 // vpinsrb xmm3, xmm3, byte [rdx + rax], 2 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] LONG $0x2061e3c4; WORD $0x021c; BYTE $0x03 // vpinsrb xmm3, xmm3, byte [rdx + rax], 3 @@ -31492,47 +32764,47 @@ LBB5_169: LONG $0x2061a3c4; WORD $0x021c; BYTE $0x04 // vpinsrb xmm3, xmm3, byte [rdx + r8], 4 LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] LONG $0x2061a3c4; WORD $0x0a1c; BYTE $0x05 // vpinsrb xmm3, xmm3, byte [rdx + r9], 5 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] LONG $0x2061e3c4; WORD $0x021c; BYTE $0x06 // vpinsrb xmm3, xmm3, byte [rdx + rax], 6 QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] LONG $0x2061e3c4; WORD $0x321c; BYTE $0x07 // vpinsrb xmm3, xmm3, byte [rdx + rsi], 7 - QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] LONG $0x2061a3c4; WORD $0x3a1c; BYTE $0x08 // vpinsrb xmm3, xmm3, byte [rdx + r15], 8 QUAD $0x0000010824bc8b48 // mov rdi, qword [rsp + 264] LONG $0x2061e3c4; WORD $0x3a1c; BYTE $0x09 // vpinsrb xmm3, xmm3, byte [rdx + rdi], 9 LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] LONG $0x2061e3c4; WORD $0x021c; BYTE $0x0a // vpinsrb xmm3, xmm3, byte [rdx + rax], 10 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] LONG $0x2061e3c4; WORD $0x1a1c; BYTE $0x0b // vpinsrb xmm3, xmm3, byte [rdx + rbx], 11 LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] LONG $0x2061e3c4; WORD $0x1a1c; BYTE $0x0c // vpinsrb xmm3, xmm3, byte [rdx + rbx], 12 QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] LONG $0x2061e3c4; WORD $0x1a1c; BYTE $0x0d // vpinsrb xmm3, xmm3, byte [rdx + rbx], 13 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] LONG $0x2061e3c4; WORD $0x1a1c; BYTE $0x0e // vpinsrb xmm3, xmm3, byte [rdx + rbx], 14 QUAD $0x0000012024b48b4c // mov r14, qword [rsp + 288] LONG $0x2061a3c4; WORD $0x321c; BYTE $0x0f // vpinsrb xmm3, xmm3, byte [rdx + r14], 15 QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] QUAD $0x01011a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 1], 1 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] QUAD $0x02011a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 1], 2 - QUAD $0x000000c824ac8b4c // mov r13, qword [rsp + 200] + QUAD $0x000000f024ac8b4c // mov r13, qword [rsp + 240] QUAD $0x03012a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 1], 3 QUAD $0x04010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 4 QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] QUAD $0x05010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 5 QUAD $0x000000e8248c8b48 // mov rcx, qword [rsp + 232] QUAD $0x06010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 6 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x07010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 7 LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x08010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 8 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x09010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 9 LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] QUAD $0x0a010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 10 QUAD $0x0b0112642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 1], 11 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] QUAD $0x0c011a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 1], 12 QUAD $0x000000a824948b4c // mov r10, qword [rsp + 168] QUAD $0x0d0112642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 1], 13 @@ -31540,30 +32812,30 @@ LBB5_169: QUAD $0x0e010a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 1], 14 QUAD $0x0f0122642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r12 + 1], 15 QUAD $0x01011a6c2029a3c4 // vpinsrb xmm5, xmm10, byte [rdx + r11 + 1], 1 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x02010a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 1], 2 QUAD $0x0000008824a48b4c // mov r12, qword [rsp + 136] QUAD $0x0301226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 1], 3 QUAD $0x0401026c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r8 + 1], 4 QUAD $0x05010a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 1], 5 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x06010a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 1], 6 QUAD $0x0701326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 1], 7 QUAD $0x08013a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r15 + 1], 8 QUAD $0x09013a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 1], 9 WORD $0x8949; BYTE $0xfd // mov r13, rdi QUAD $0x0a01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 10 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0b01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 12 QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0d01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0e01026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 1], 14 LONG $0x386563c4; WORD $0x01e8 // vinserti128 ymm13, ymm3, xmm0, 1 QUAD $0x0f0132442051a3c4 // vpinsrb xmm0, xmm5, byte [rdx + r14 + 1], 15 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x08 // movzx edi, byte [rdx + rax + 8] LONG $0xcf6e79c5 // vmovd xmm9, edi LONG $0x387de3c4; WORD $0x01c4 // vinserti128 ymm0, ymm0, xmm4, 1 @@ -31574,9 +32846,9 @@ LBB5_169: QUAD $0x0001e024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 480] QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] QUAD $0x01023a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 2], 1 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x020202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 2 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] QUAD $0x030202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 3 QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x040202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 4 @@ -31584,11 +32856,11 @@ LBB5_169: QUAD $0x050202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 5 QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] QUAD $0x060202442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 2], 6 - LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] + LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] QUAD $0x07020a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 2], 7 LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] QUAD $0x08021a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 2], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x090202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 9 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x0a0202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 10 @@ -31598,12 +32870,12 @@ LBB5_169: QUAD $0x0d0212442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 2], 13 LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0e0202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 14 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0f0202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 2], 15 QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] QUAD $0x0001c0249c6ff9c5; BYTE $0x00 // vmovdqa xmm3, oword [rsp + 448] QUAD $0x0102125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 2], 1 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x0202025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 2], 2 QUAD $0x0302225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 2], 3 QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] @@ -31612,26 +32884,26 @@ LBB5_169: QUAD $0x0502225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 2], 5 QUAD $0x06020a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 2], 6 QUAD $0x0702325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x08023a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 2], 8 WORD $0x894c; BYTE $0xe9 // mov rcx, r13 QUAD $0x09022a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 2], 9 LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] QUAD $0x0a02325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 2], 10 - LONG $0x246c8b4c; BYTE $0x28 // mov r13, qword [rsp + 40] + LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] QUAD $0x0b022a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 2], 11 LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] QUAD $0x0c021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 12 QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0d021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 13 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x0e021a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 2], 14 QUAD $0x0000012024ac8b4c // mov r13, qword [rsp + 288] QUAD $0x0f022a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 2], 15 QUAD $0x01033a642021a3c4 // vpinsrb xmm4, xmm11, byte [rdx + r15 + 3], 1 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] QUAD $0x02031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 2 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] + QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] QUAD $0x03031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 3 QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] QUAD $0x04031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 4 @@ -31640,19 +32912,19 @@ LBB5_169: QUAD $0x060302642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 3], 6 QUAD $0x07030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 7 QUAD $0x08031a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 3], 8 - LONG $0x244c8b4c; BYTE $0x38 // mov r9, qword [rsp + 56] + LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] QUAD $0x09030a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 3], 9 LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] QUAD $0x0a031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 10 LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] QUAD $0x0b031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 11 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] QUAD $0x0c031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 12 QUAD $0x000000a824ac8b4c // mov r13, qword [rsp + 168] QUAD $0x0d032a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r13 + 3], 13 LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] QUAD $0x0e031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 14 - QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] QUAD $0x0f031a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 3], 15 QUAD $0x0103126c2039a3c4 // vpinsrb xmm5, xmm8, byte [rdx + r10 + 3], 1 WORD $0x894c; BYTE $0xd3 // mov rbx, r10 @@ -31661,14 +32933,14 @@ LBB5_169: QUAD $0x0303026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 3 QUAD $0x0403326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 3], 4 QUAD $0x0503226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 3], 5 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x0603026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 6 QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] QUAD $0x0703026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 7 QUAD $0x08033a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 3], 8 QUAD $0x09030a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 3], 9 QUAD $0x0a03326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 3], 10 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0b03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 12 @@ -31676,9 +32948,9 @@ LBB5_169: QUAD $0x0d03026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 3], 13 LONG $0x3865e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm3, xmm0, 1 QUAD $0x0001e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 480], ymm0 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0e0302442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 3], 14 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x09 // movzx edi, byte [rdx + rax + 9] LONG $0xc76e79c5 // vmovd xmm8, edi QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] @@ -31690,9 +32962,9 @@ LBB5_169: LONG $0xdf6e79c5 // vmovd xmm11, edi QUAD $0x0001a024846ff9c5; BYTE $0x00 // vmovdqa xmm0, oword [rsp + 416] QUAD $0x01043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 1 - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] QUAD $0x02043a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 4], 2 - QUAD $0x000000c824a48b4c // mov r12, qword [rsp + 200] + QUAD $0x000000f024a48b4c // mov r12, qword [rsp + 240] QUAD $0x030422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 4], 3 QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x040432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 4], 4 @@ -31700,7 +32972,7 @@ LBB5_169: QUAD $0x050402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 4], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x060402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 6 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x070402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 7 QUAD $0x08041a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 4], 8 QUAD $0x09040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 9 @@ -31708,37 +32980,37 @@ LBB5_169: QUAD $0x0a0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 10 LONG $0x244c8b4c; BYTE $0x58 // mov r9, qword [rsp + 88] QUAD $0x0b040a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 4], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 12 QUAD $0x0d042a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 4], 13 LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0e0402442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 4], 14 - QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] + QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] QUAD $0x0f0412442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 4], 15 QUAD $0x01041a5c2001e3c4 // vpinsrb xmm3, xmm15, byte [rdx + rbx + 4], 1 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x02043a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 4], 2 QUAD $0x0000008824ac8b4c // mov r13, qword [rsp + 136] QUAD $0x03042a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 4], 3 QUAD $0x0404325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 4], 4 LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0504025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 5 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x06043a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 4], 6 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] QUAD $0x07043a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 4], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] QUAD $0x0804025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 8 QUAD $0x09040a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 4], 9 LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0a04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 10 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0b040a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 4], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 12 QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0d04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0e04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 14 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f04025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 4], 15 @@ -31750,15 +33022,15 @@ LBB5_169: QUAD $0x050502642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 5], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x060502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 6 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] QUAD $0x07053a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r15 + 5], 7 QUAD $0x08051a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 5], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x090502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 9 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x0a0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 10 QUAD $0x0b050a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r9 + 5], 11 - LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] + LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] QUAD $0x0c051a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 5], 12 QUAD $0x000000a824b48b48 // mov rsi, qword [rsp + 168] QUAD $0x0d0532642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rsi + 5], 13 @@ -31766,17 +33038,17 @@ LBB5_169: QUAD $0x0e0502642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 5], 14 QUAD $0x0f0512642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r10 + 5], 15 QUAD $0x01051a6c2049e3c4 // vpinsrb xmm5, xmm6, byte [rdx + rbx + 5], 1 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x0205026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 2 QUAD $0x03052a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r13 + 5], 3 QUAD $0x0405326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 5], 4 WORD $0x894d; BYTE $0xf1 // mov r9, r14 LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0505026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 5 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x0605026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 6 QUAD $0x07053a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 5], 7 - QUAD $0x000000b824b48b4c // mov r14, qword [rsp + 184] + QUAD $0x000000d024b48b4c // mov r14, qword [rsp + 208] QUAD $0x0805326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 5], 8 QUAD $0x0000010824a48b4c // mov r12, qword [rsp + 264] QUAD $0x0905226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 5], 9 @@ -31787,12 +33059,12 @@ LBB5_169: QUAD $0x0c05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 12 QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0d05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0e05026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 5], 14 LONG $0x386563c4; WORD $0x01f0 // vinserti128 ymm14, ymm3, xmm0, 1 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f0502442051e3c4 // vpinsrb xmm0, xmm5, byte [rdx + rax + 5], 15 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x0a // movzx edi, byte [rdx + rax + 10] LONG $0xdf6ef9c5 // vmovd xmm3, edi LONG $0x387d63c4; WORD $0x01fc // vinserti128 ymm15, ymm0, xmm4, 1 @@ -31801,9 +33073,9 @@ LBB5_169: LONG $0xe76ef9c5 // vmovd xmm4, edi QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x010602442019e3c4 // vpinsrb xmm0, xmm12, byte [rdx + rax + 6], 1 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x020602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 2 - QUAD $0x000000c8249c8b48 // mov rbx, qword [rsp + 200] + QUAD $0x000000f0249c8b48 // mov rbx, qword [rsp + 240] QUAD $0x03061a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 6], 3 QUAD $0x000000a024848b4c // mov r8, qword [rsp + 160] QUAD $0x040602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 6], 4 @@ -31814,7 +33086,7 @@ LBB5_169: QUAD $0x07063a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 6], 7 LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x080602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 8 - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] QUAD $0x09063a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 6], 9 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x0a0602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 6], 10 @@ -31824,18 +33096,18 @@ LBB5_169: QUAD $0x0d0632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 6], 13 LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] QUAD $0x0e0632442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 6], 14 - QUAD $0x0000009824ac8b4c // mov r13, qword [rsp + 152] + QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] QUAD $0x0f062a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 6], 15 QUAD $0x000000e0249c8b4c // mov r11, qword [rsp + 224] QUAD $0x01061a6c2041a3c4 // vpinsrb xmm5, xmm7, byte [rdx + r11 + 6], 1 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x02060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 2 QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] QUAD $0x03060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 3 QUAD $0x04060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 4 LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x05060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 5 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x06063a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rdi + 6], 6 QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] QUAD $0x07060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 7 @@ -31843,26 +33115,26 @@ LBB5_169: QUAD $0x0906226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 9 LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] QUAD $0x0a060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 10 - LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] + LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] QUAD $0x0b06326c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r14 + 6], 11 LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] QUAD $0x0c060a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r9 + 6], 12 QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] QUAD $0x0d060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0e060a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 6], 14 QUAD $0x0000012024a48b4c // mov r12, qword [rsp + 288] QUAD $0x0f06226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 6], 15 QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x01070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 1 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] QUAD $0x02070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 2 QUAD $0x03071a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 7], 3 QUAD $0x040702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 7], 4 QUAD $0x050712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 7], 5 QUAD $0x000000e824848b4c // mov r8, qword [rsp + 232] QUAD $0x060702542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 7], 6 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x07070a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 7], 7 LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] QUAD $0x080712542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 7], 8 @@ -31870,7 +33142,7 @@ LBB5_169: QUAD $0x0a0702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 10 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] QUAD $0x0b0702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c0702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 7], 12 QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] QUAD $0x0d071a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 7], 13 @@ -31878,7 +33150,7 @@ LBB5_169: QUAD $0x0f072a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 7], 15 QUAD $0x01071a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 7], 1 WORD $0x894d; BYTE $0xdd // mov r13, r11 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] QUAD $0x0207324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 7], 2 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x0307024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 7], 3 @@ -31889,7 +33161,7 @@ LBB5_169: QUAD $0x06073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 6 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] QUAD $0x07073a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 7], 7 - QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] + QUAD $0x000000d0249c8b4c // mov r11, qword [rsp + 208] QUAD $0x08071a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 7], 8 QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] QUAD $0x09070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 9 @@ -31901,9 +33173,9 @@ LBB5_169: QUAD $0x0d070a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 7], 13 LONG $0x3855e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm5, xmm0, 1 QUAD $0x0001a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 416], ymm0 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0e073a442071e3c4 // vpinsrb xmm0, xmm1, byte [rdx + rdi + 7], 14 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] LONG $0x0a7cb60f; BYTE $0x0b // movzx edi, byte [rdx + rcx + 11] LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] @@ -31915,30 +33187,30 @@ LBB5_169: LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x01083a442031e3c4 // vpinsrb xmm0, xmm9, byte [rdx + rdi + 8], 1 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] QUAD $0x02083a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 8], 2 - QUAD $0x000000c824bc8b4c // mov r15, qword [rsp + 200] + QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] QUAD $0x03083a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 8], 3 QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] QUAD $0x04080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 4 QUAD $0x000000f8248c8b4c // mov r9, qword [rsp + 248] QUAD $0x05080a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 8], 5 QUAD $0x060802442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 8], 6 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x07080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 7 QUAD $0x080812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 8], 8 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x09080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 9 LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] QUAD $0x0a080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 10 LONG $0x24548b4c; BYTE $0x58 // mov r10, qword [rsp + 88] QUAD $0x0b0812442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 8], 11 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x0c080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 12 QUAD $0x0d081a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 8], 13 LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] QUAD $0x0e080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 14 - QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] QUAD $0x0f080a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 8], 15 QUAD $0x01082a6c2029a3c4 // vpinsrb xmm5, xmm10, byte [rdx + r13 + 8], 1 WORD $0x8949; BYTE $0xf6 // mov r14, rsi @@ -31948,7 +33220,7 @@ LBB5_169: QUAD $0x04080a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rcx + 8], 4 WORD $0x894c; BYTE $0xe7 // mov rdi, r12 QUAD $0x0508226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 8], 5 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0608326c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rsi + 8], 6 QUAD $0x0000009024a48b4c // mov r12, qword [rsp + 144] QUAD $0x0708226c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r12 + 8], 7 @@ -31957,41 +33229,41 @@ LBB5_169: QUAD $0x0908026c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rax + 8], 9 LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] QUAD $0x0a081a6c2051a3c4 // vpinsrb xmm5, xmm5, byte [rdx + r11 + 8], 10 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] QUAD $0x0b081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 11 LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] QUAD $0x0c081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 12 QUAD $0x00000140249c8b48 // mov rbx, qword [rsp + 320] QUAD $0x0d081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 13 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x0e081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 14 QUAD $0x00000120249c8b48 // mov rbx, qword [rsp + 288] QUAD $0x0f081a6c2051e3c4 // vpinsrb xmm5, xmm5, byte [rdx + rbx + 8], 15 QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] QUAD $0x01091a742039e3c4 // vpinsrb xmm6, xmm8, byte [rdx + rbx + 9], 1 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] + QUAD $0x000000b8249c8b48 // mov rbx, qword [rsp + 184] QUAD $0x02091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 2 QUAD $0x03093a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r15 + 9], 3 QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] QUAD $0x04091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 4 QUAD $0x05090a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r9 + 9], 5 QUAD $0x060902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 6 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] QUAD $0x07093a742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r15 + 9], 7 LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] QUAD $0x08091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 8 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x09091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 9 LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] QUAD $0x0a091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 10 QUAD $0x0b0912742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r10 + 9], 11 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] QUAD $0x0c091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 12 QUAD $0x000000a824848b4c // mov r8, qword [rsp + 168] QUAD $0x0d0902742049a3c4 // vpinsrb xmm6, xmm6, byte [rdx + r8 + 9], 13 LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] QUAD $0x0e091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 14 - QUAD $0x00000098249c8b48 // mov rbx, qword [rsp + 152] + QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] QUAD $0x0f091a742049e3c4 // vpinsrb xmm6, xmm6, byte [rdx + rbx + 9], 15 QUAD $0x01092a7c2021a3c4 // vpinsrb xmm7, xmm11, byte [rdx + r13 + 9], 1 QUAD $0x0209327c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r14 + 9], 2 @@ -32002,23 +33274,23 @@ LBB5_169: QUAD $0x0609327c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rsi + 9], 6 WORD $0x8949; BYTE $0xf6 // mov r14, rsi QUAD $0x0709227c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r12 + 9], 7 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] QUAD $0x08090a7c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rcx + 9], 8 QUAD $0x0909027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 9 QUAD $0x0a091a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r11 + 9], 10 - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] + LONG $0x245c8b4c; BYTE $0x38 // mov r11, qword [rsp + 56] QUAD $0x0b091a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r11 + 9], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 12 QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0d09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0e09027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 9], 14 LONG $0x3855e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm5, xmm0, 1 QUAD $0x00048024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1152], ymm0 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f09026c2041e3c4 // vpinsrb xmm5, xmm7, byte [rdx + rax + 9], 15 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x0c // movzx edi, byte [rdx + rax + 12] LONG $0xc76ef9c5 // vmovd xmm0, edi LONG $0x3855e3c4; WORD $0x01ee // vinserti128 ymm5, ymm5, xmm6, 1 @@ -32028,9 +33300,9 @@ LBB5_169: LONG $0xef6ef9c5 // vmovd xmm5, edi QUAD $0x0000008024ac8b4c // mov r13, qword [rsp + 128] QUAD $0x010a2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 10], 1 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] + QUAD $0x000000b824a48b4c // mov r12, qword [rsp + 184] QUAD $0x020a225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 10], 2 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] QUAD $0x030a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 3 QUAD $0x000000a024948b4c // mov r10, qword [rsp + 160] QUAD $0x040a125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 10], 4 @@ -32040,22 +33312,22 @@ LBB5_169: QUAD $0x070a3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 10], 7 LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x080a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x090a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 9 LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] QUAD $0x0a0a325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 10], 10 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] QUAD $0x0b0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 12 QUAD $0x0d0a025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 10], 13 LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0e0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 14 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0f0a025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 10], 15 QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] QUAD $0x010a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 1 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x020a0a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rcx + 10], 2 QUAD $0x030a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 3 QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] @@ -32065,7 +33337,7 @@ LBB5_169: QUAD $0x060a32642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 10], 6 QUAD $0x00000090249c8b48 // mov rbx, qword [rsp + 144] QUAD $0x070a1a642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rbx + 10], 7 - QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] + QUAD $0x000000d024848b4c // mov r8, qword [rsp + 208] QUAD $0x080a02642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r8 + 10], 8 QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] QUAD $0x090a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 9 @@ -32076,34 +33348,34 @@ LBB5_169: QUAD $0x0c0a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 12 QUAD $0x00000140249c8b4c // mov r11, qword [rsp + 320] QUAD $0x0d0a1a642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r11 + 10], 13 - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] + LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] QUAD $0x0e0a32642059a3c4 // vpinsrb xmm4, xmm4, byte [rdx + r14 + 10], 14 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f0a02642059e3c4 // vpinsrb xmm4, xmm4, byte [rdx + rax + 10], 15 QUAD $0x010b2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 11], 1 QUAD $0x020b224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 11], 2 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] QUAD $0x030b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 3 QUAD $0x040b124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 11], 4 QUAD $0x000000f824a48b4c // mov r12, qword [rsp + 248] QUAD $0x050b224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 11], 5 QUAD $0x060b0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 11], 6 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x070b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 7 LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x080b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x090b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 9 QUAD $0x0a0b324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 11], 10 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] QUAD $0x0b0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 12 QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0d0b024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 11], 13 LONG $0x246c8b4c; BYTE $0x50 // mov r13, qword [rsp + 80] QUAD $0x0e0b2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 11], 14 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x0f0b324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 11], 15 QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] QUAD $0x010b0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 11], 1 @@ -32112,7 +33384,7 @@ LBB5_169: QUAD $0x030b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 3 QUAD $0x040b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 11], 4 QUAD $0x050b3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 11], 5 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x060b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 6 QUAD $0x070b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 7 QUAD $0x080b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 11], 8 @@ -32120,7 +33392,7 @@ LBB5_169: QUAD $0x090b1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 11], 9 LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0a0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 10 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0b0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c0b02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 11], 12 @@ -32128,7 +33400,7 @@ LBB5_169: LONG $0x385de3c4; WORD $0x01db // vinserti128 ymm3, ymm4, xmm3, 1 QUAD $0x000440249c7ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 1088], ymm3 QUAD $0x0e0b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 11], 14 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x0d // movzx edi, byte [rdx + rax + 13] LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] @@ -32140,32 +33412,32 @@ LBB5_169: LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x010c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 12], 1 - QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] QUAD $0x020c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 12], 2 - QUAD $0x000000c824bc8b4c // mov r15, qword [rsp + 200] + QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] QUAD $0x030c3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 12], 3 QUAD $0x040c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 12], 4 QUAD $0x050c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 12], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x060c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 6 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x070c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 7 LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x080c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x090c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 9 LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x0a0c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 10 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] QUAD $0x0b0c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c0c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 12 QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0d0c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 12], 13 QUAD $0x0e0c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 12], 14 QUAD $0x0f0c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 12], 15 QUAD $0x010c0a542051a3c4 // vpinsrb xmm2, xmm5, byte [rdx + r9 + 12], 1 - QUAD $0x000000d824b48b48 // mov rsi, qword [rsp + 216] + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] QUAD $0x020c32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 12], 2 QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] QUAD $0x030c3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 12], 3 @@ -32173,16 +33445,16 @@ LBB5_169: QUAD $0x040c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 4 LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] QUAD $0x050c2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 12], 5 - QUAD $0x000000d0248c8b4c // mov r9, qword [rsp + 208] + QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] QUAD $0x060c0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 12], 6 QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] QUAD $0x070c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 7 - QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] + QUAD $0x000000d0249c8b4c // mov r11, qword [rsp + 208] QUAD $0x080c1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 12], 8 QUAD $0x090c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 9 LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0a0c02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 12], 10 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] QUAD $0x0b0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 11 LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] QUAD $0x0c0c1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 12], 12 @@ -32199,23 +33471,23 @@ LBB5_169: WORD $0x894d; BYTE $0xe2 // mov r10, r12 QUAD $0x000000e8249c8b48 // mov rbx, qword [rsp + 232] QUAD $0x060d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 6 - LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] QUAD $0x070d025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 13], 7 LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] QUAD $0x080d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 8 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x090d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 9 LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] QUAD $0x0a0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 10 LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] QUAD $0x0b0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 11 - LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] + LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] QUAD $0x0c0d3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 13], 12 QUAD $0x000000a8249c8b48 // mov rbx, qword [rsp + 168] QUAD $0x0d0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 13 LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] QUAD $0x0e0d1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 13], 14 - QUAD $0x0000009824a48b4c // mov r12, qword [rsp + 152] + QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] QUAD $0x0f0d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 13], 15 QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] QUAD $0x010d1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 13], 1 @@ -32231,7 +33503,7 @@ LBB5_169: QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x090d324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 13], 9 QUAD $0x0a0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 10 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0b0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c0d024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 13], 12 @@ -32242,7 +33514,7 @@ LBB5_169: QUAD $0x0003e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 992], ymm0 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f0d02442071e3c4 // vpinsrb xmm0, xmm1, byte [rdx + rax + 13], 15 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x0e // movzx edi, byte [rdx + rax + 14] LONG $0xcf6ef9c5 // vmovd xmm1, edi LONG $0x387de3c4; WORD $0x01c3 // vinserti128 ymm0, ymm0, xmm3, 1 @@ -32251,9 +33523,9 @@ LBB5_169: LONG $0x027cb60f; BYTE $0x0e // movzx edi, byte [rdx + rax + 14] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x010e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 1 - QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] + QUAD $0x000000b8248c8b4c // mov r9, qword [rsp + 184] QUAD $0x020e0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 14], 2 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] QUAD $0x030e0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 14], 3 QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x040e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 4 @@ -32263,7 +33535,7 @@ LBB5_169: QUAD $0x070e024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 14], 7 LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] QUAD $0x080e1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 14], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x090e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 14], 9 LONG $0x24548b4c; BYTE $0x78 // mov r10, qword [rsp + 120] QUAD $0x0a0e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 14], 10 @@ -32277,7 +33549,7 @@ LBB5_169: QUAD $0x0f0e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 14], 15 QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x010e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 1 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x020e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 2 QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] QUAD $0x030e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 3 @@ -32285,26 +33557,26 @@ LBB5_169: QUAD $0x040e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 4 LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x050e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 5 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x060e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 6 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] QUAD $0x070e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x080e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 8 QUAD $0x090e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 9 LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] QUAD $0x0a0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 10 - LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0b0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 11 LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] QUAD $0x0c0e22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 14], 12 QUAD $0x0000014024b48b48 // mov rsi, qword [rsp + 320] QUAD $0x0d0e32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 14], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0e0e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 14 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x0f0e3a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 14], 15 - QUAD $0x000000f024848b4c // mov r8, qword [rsp + 240] + QUAD $0x000000d824848b4c // mov r8, qword [rsp + 216] LONG $0x7cb60f42; WORD $0x0f02 // movzx edi, byte [rdx + r8 + 15] LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] @@ -32316,24 +33588,24 @@ LBB5_169: QUAD $0x000000f8248c8b48 // mov rcx, qword [rsp + 248] QUAD $0x050f0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 15], 5 QUAD $0x060f2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 15], 6 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x070f0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 15], 7 QUAD $0x080f1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 15], 8 QUAD $0x090f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 9 QUAD $0x0a0f12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 15], 10 QUAD $0x0b0f1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 15], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c0f02542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 15], 12 QUAD $0x0d0f3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 15], 13 QUAD $0x0e0f32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 15], 14 - QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] + QUAD $0x000000b0248c8b4c // mov r9, qword [rsp + 176] QUAD $0x0f0f0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 15], 15 QUAD $0x00000100249c8b48 // mov rbx, qword [rsp + 256] LONG $0x1a7cb60f; BYTE $0x0f // movzx edi, byte [rdx + rbx + 15] LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x000000e0249c8b4c // mov r11, qword [rsp + 224] QUAD $0x010f1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 15], 1 - QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] + QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] QUAD $0x020f0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 15], 2 QUAD $0x0000008824948b4c // mov r10, qword [rsp + 136] QUAD $0x030f125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 15], 3 @@ -32341,21 +33613,21 @@ LBB5_169: QUAD $0x040f0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 15], 4 LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x050f0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 15], 5 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] QUAD $0x060f0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 15], 6 QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] QUAD $0x070f325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 15], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x080f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 8 QUAD $0x0000010824bc8b48 // mov rdi, qword [rsp + 264] QUAD $0x090f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 9 LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x0a0f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 10 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0b0f3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 15], 11 QUAD $0x0c0f225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 15], 12 QUAD $0x0d0f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 13 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0e0f325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 15], 14 QUAD $0x0000012024bc8b4c // mov r15, qword [rsp + 288] QUAD $0x0f0f3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 15], 15 @@ -32367,9 +33639,9 @@ LBB5_169: LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x011032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 1 - QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] QUAD $0x021032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 2 - QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x000000f024b48b48 // mov rsi, qword [rsp + 240] QUAD $0x031032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 3 QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x041032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 4 @@ -32377,11 +33649,11 @@ LBB5_169: QUAD $0x051032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 5 QUAD $0x06102a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 16], 6 WORD $0x894d; BYTE $0xec // mov r12, r13 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x071032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 7 LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] QUAD $0x081032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 8 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x091032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 9 LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] QUAD $0x0a1032442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 16], 10 @@ -32396,7 +33668,7 @@ LBB5_169: LONG $0x1a7cb60f; BYTE $0x10 // movzx edi, byte [rdx + rbx + 16] LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x01101a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 16], 1 - QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] + QUAD $0x000000c8248c8b4c // mov r9, qword [rsp + 200] QUAD $0x02100a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 16], 2 QUAD $0x0310124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 16], 3 QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] @@ -32405,51 +33677,51 @@ LBB5_169: QUAD $0x0510024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 5 QUAD $0x06100a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 16], 6 QUAD $0x0710324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 16], 7 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] QUAD $0x0810324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 16], 8 QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] QUAD $0x0910024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 16], 9 LONG $0x246c8b4c; BYTE $0x40 // mov r13, qword [rsp + 64] QUAD $0x0a102a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 16], 10 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0b103a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 16], 11 LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x0c103a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 16], 12 QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] QUAD $0x0d103a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 16], 13 - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] + LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] QUAD $0x0e10324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 16], 14 QUAD $0x0f103a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 16], 15 - QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] LONG $0x3a7cb60f; BYTE $0x11 // movzx edi, byte [rdx + rdi + 17] LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x01113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 1 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] QUAD $0x02110a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 17], 2 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] QUAD $0x03113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 3 QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] QUAD $0x04113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 4 QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] QUAD $0x051112542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 17], 5 QUAD $0x061122542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 17], 6 - LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] + LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] QUAD $0x071122542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 17], 7 LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x08113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 8 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x09113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 9 LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] QUAD $0x0a111a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 17], 10 LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] QUAD $0x0b113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 11 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0c113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 12 QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] QUAD $0x0d111a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 17], 13 QUAD $0x0e1102542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 17], 14 - QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x0f113a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 17], 15 QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] LONG $0x3a7cb60f; BYTE $0x11 // movzx edi, byte [rdx + rdi + 17] @@ -32463,14 +33735,14 @@ LBB5_169: QUAD $0x04113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 4 LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x05113a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 17], 5 - QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] QUAD $0x06113a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 17], 6 QUAD $0x0000009024848b4c // mov r8, qword [rsp + 144] QUAD $0x0711025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 17], 7 QUAD $0x0811325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 8 QUAD $0x0911025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 17], 9 QUAD $0x0a112a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 17], 10 - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] + LONG $0x244c8b4c; BYTE $0x38 // mov r9, qword [rsp + 56] QUAD $0x0b110a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 17], 11 LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0c11325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 17], 12 @@ -32483,13 +33755,13 @@ LBB5_169: QUAD $0x0f1102442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rax + 17], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00036024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 864], ymm0 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x12 // movzx edi, byte [rdx + rax + 18] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x011202442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 18], 1 QUAD $0x02120a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 18], 2 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] QUAD $0x03120a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 18], 3 QUAD $0x000000a024ac8b4c // mov r13, qword [rsp + 160] QUAD $0x04122a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 18], 4 @@ -32499,24 +33771,24 @@ LBB5_169: QUAD $0x071222442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 18], 7 LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x08120a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 18], 8 - LONG $0x24548b4c; BYTE $0x38 // mov r10, qword [rsp + 56] + LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] QUAD $0x091212442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 18], 9 QUAD $0x0a121a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 18], 10 LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] QUAD $0x0b123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 11 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0c123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 12 QUAD $0x0d121a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 18], 13 LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x0e123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 14 - QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] QUAD $0x0f123a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 18], 15 QUAD $0x00000100249c8b4c // mov r11, qword [rsp + 256] LONG $0x7cb60f42; WORD $0x121a // movzx edi, byte [rdx + r11 + 18] LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x01123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 1 - QUAD $0x000000d824b48b4c // mov r14, qword [rsp + 216] + QUAD $0x000000c824b48b4c // mov r14, qword [rsp + 200] QUAD $0x0212324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 18], 2 QUAD $0x0000008824a48b4c // mov r12, qword [rsp + 136] QUAD $0x0312224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 18], 3 @@ -32526,7 +33798,7 @@ LBB5_169: QUAD $0x05123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 5 QUAD $0x06123a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 18], 6 QUAD $0x0712024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x08123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 8 QUAD $0x0000010824bc8b48 // mov rdi, qword [rsp + 264] QUAD $0x09123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 9 @@ -32536,37 +33808,37 @@ LBB5_169: QUAD $0x0c12324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 18], 12 QUAD $0x00000140248c8b4c // mov r9, qword [rsp + 320] QUAD $0x0d120a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 18], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0e123a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 18], 14 QUAD $0x0000012024848b4c // mov r8, qword [rsp + 288] QUAD $0x0f12024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 18], 15 - QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] LONG $0x3a7cb60f; BYTE $0x13 // movzx edi, byte [rdx + rdi + 19] LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x011302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 1 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x021302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 2 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] + QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] QUAD $0x03133a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 19], 3 QUAD $0x04132a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 19], 4 QUAD $0x000000f824848b48 // mov rax, qword [rsp + 248] QUAD $0x051302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x061302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 6 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x071302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 7 QUAD $0x08130a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 19], 8 QUAD $0x091312542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 19], 9 QUAD $0x0a131a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 19], 10 LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] QUAD $0x0b131a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 19], 11 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] QUAD $0x0c132a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 19], 12 QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0d1302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 13 LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0e1302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 14 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0f1302542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 19], 15 LONG $0x7cb60f42; WORD $0x131a // movzx edi, byte [rdx + r11 + 19] LONG $0xdf6ef9c5 // vmovd xmm3, edi @@ -32581,31 +33853,31 @@ LBB5_169: QUAD $0x06133a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 19], 6 QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] QUAD $0x0713025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 7 - QUAD $0x000000b8248c8b48 // mov rcx, qword [rsp + 184] + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] QUAD $0x08130a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 19], 8 QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] QUAD $0x0913025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 9 LONG $0x24548b4c; BYTE $0x40 // mov r10, qword [rsp + 64] QUAD $0x0a13125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 19], 10 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0b13025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 19], 11 QUAD $0x0c13325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 19], 12 QUAD $0x0d130a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 19], 13 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] + LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] QUAD $0x0e130a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 19], 14 QUAD $0x0f13025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 19], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00032024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 800], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x00034024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 832], ymm0 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x14 // movzx edi, byte [rdx + rax + 20] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x0000008024b48b48 // mov rsi, qword [rsp + 128] QUAD $0x011432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 1 - QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x000000b8249c8b4c // mov r11, qword [rsp + 184] QUAD $0x02141a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 20], 2 - QUAD $0x000000c824a48b4c // mov r12, qword [rsp + 200] + QUAD $0x000000f024a48b4c // mov r12, qword [rsp + 240] QUAD $0x031422442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 20], 3 QUAD $0x000000a024b48b48 // mov rsi, qword [rsp + 160] QUAD $0x041432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 4 @@ -32613,11 +33885,11 @@ LBB5_169: QUAD $0x051402442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 20], 5 QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] QUAD $0x06143a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 20], 6 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x071432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 7 LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] QUAD $0x081432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 8 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x091432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 9 LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] QUAD $0x0a1432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 10 @@ -32627,14 +33899,14 @@ LBB5_169: QUAD $0x0d1432442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 20], 13 LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] QUAD $0x0e1432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 14 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] QUAD $0x0f1432442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 20], 15 QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] LONG $0x3a7cb60f; BYTE $0x14 // movzx edi, byte [rdx + rdi + 20] LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x01143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 1 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x02143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 2 QUAD $0x00000088249c8b48 // mov rbx, qword [rsp + 136] QUAD $0x03141a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 20], 3 @@ -32642,7 +33914,7 @@ LBB5_169: QUAD $0x04143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 4 LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x05143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 5 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x06143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 6 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] QUAD $0x07143a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 20], 7 @@ -32650,7 +33922,7 @@ LBB5_169: QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] QUAD $0x09140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 9 QUAD $0x0a14124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 20], 10 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0b140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 11 LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x0c140a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 20], 12 @@ -32670,17 +33942,17 @@ LBB5_169: QUAD $0x041502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 4 QUAD $0x051502542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 21], 5 QUAD $0x06153a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 21], 6 - LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] QUAD $0x071502542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 21], 7 LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] QUAD $0x08153a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 21], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x091502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 9 LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] QUAD $0x0a1522542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 21], 10 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] QUAD $0x0b1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c1502542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 21], 12 QUAD $0x0d1532542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 21], 13 LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] @@ -32691,42 +33963,42 @@ LBB5_169: LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] QUAD $0x0115325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 21], 1 - QUAD $0x000000d824948b4c // mov r10, qword [rsp + 216] + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] QUAD $0x0215125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 21], 2 QUAD $0x03151a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 21], 3 QUAD $0x000000c0249c8b48 // mov rbx, qword [rsp + 192] QUAD $0x04151a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 21], 4 LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x05150a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 21], 5 - QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] + QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] QUAD $0x0615325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 21], 6 QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] QUAD $0x0715325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 21], 7 - QUAD $0x000000b824b48b48 // mov rsi, qword [rsp + 184] + QUAD $0x000000d024b48b48 // mov rsi, qword [rsp + 208] QUAD $0x0815325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 21], 8 QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x0915325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 21], 9 LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x0a153a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 21], 10 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0b153a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 21], 11 LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x0c153a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 21], 12 QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] QUAD $0x0d153a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 21], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0e153a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 21], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x0002e024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 736], ymm0 QUAD $0x0f150a442061a3c4 // vpinsrb xmm0, xmm3, byte [rdx + r9 + 21], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00030024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 768], ymm0 - QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] LONG $0x3a7cb60f; BYTE $0x16 // movzx edi, byte [rdx + rdi + 22] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x01163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 1 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] QUAD $0x02163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 2 QUAD $0x03162a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 22], 3 QUAD $0x000000a0248c8b4c // mov r9, qword [rsp + 160] @@ -32737,17 +34009,17 @@ LBB5_169: QUAD $0x06163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 6 QUAD $0x071602442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 22], 7 QUAD $0x08163a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 22], 8 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x09163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 9 QUAD $0x0a1622442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 22], 10 LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] QUAD $0x0b163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 11 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0c163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 12 QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x0d163a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 22], 13 QUAD $0x0e1602442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 22], 14 - QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] QUAD $0x0f163a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 22], 15 LONG $0x7cb60f42; WORD $0x161a // movzx edi, byte [rdx + r11 + 22] LONG $0xcf6ef9c5 // vmovd xmm1, edi @@ -32757,31 +34029,31 @@ LBB5_169: QUAD $0x0316024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 3 QUAD $0x04161a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 22], 4 QUAD $0x05160a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 22], 5 - QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] + QUAD $0x0000009824a48b4c // mov r12, qword [rsp + 152] QUAD $0x0616224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 22], 6 QUAD $0x00000090249c8b48 // mov rbx, qword [rsp + 144] QUAD $0x07161a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 22], 7 - QUAD $0x000000b824948b4c // mov r10, qword [rsp + 184] + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] QUAD $0x0816124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 22], 8 QUAD $0x0916324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 22], 9 LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0a16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 10 - LONG $0x24448b4c; BYTE $0x28 // mov r8, qword [rsp + 40] + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] QUAD $0x0b16024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 22], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 12 QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0d16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0e16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 14 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f16024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 22], 15 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x17 // movzx edi, byte [rdx + rax + 23] LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x011702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 1 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x021702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 2 WORD $0x894d; BYTE $0xee // mov r14, r13 QUAD $0x03172a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 23], 3 @@ -32790,17 +34062,17 @@ LBB5_169: QUAD $0x05172a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 23], 5 QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] QUAD $0x061732542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 23], 6 - LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] + LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] QUAD $0x07170a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 23], 7 LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x081702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x091702542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 23], 9 LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] QUAD $0x0a170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 10 LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] QUAD $0x0b170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 11 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x0c170a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 23], 12 QUAD $0x000000a8249c8b4c // mov r11, qword [rsp + 168] QUAD $0x0d171a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 23], 13 @@ -32812,7 +34084,7 @@ LBB5_169: LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] QUAD $0x01173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 1 - QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] + QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] QUAD $0x02173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 2 QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] QUAD $0x03173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 3 @@ -32832,18 +34104,18 @@ LBB5_169: QUAD $0x0c173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 12 QUAD $0x0000014024bc8b48 // mov rdi, qword [rsp + 320] QUAD $0x0d173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0e173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 14 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x0f173a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 23], 15 LONG $0x387563c4; WORD $0x01d0 // vinserti128 ymm10, ymm1, xmm0, 1 LONG $0x386563c4; WORD $0x01da // vinserti128 ymm11, ymm3, xmm2, 1 - QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] + QUAD $0x000000d824bc8b48 // mov rdi, qword [rsp + 216] LONG $0x3a7cb60f; BYTE $0x18 // movzx edi, byte [rdx + rdi + 24] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x01183a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rdi + 24], 1 - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] QUAD $0x02183a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 24], 2 QUAD $0x031832442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r14 + 24], 3 QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] @@ -32862,14 +34134,14 @@ LBB5_169: QUAD $0x0d181a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 24], 13 LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0e1802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 14 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0f1802442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 24], 15 QUAD $0x0000010024b48b4c // mov r14, qword [rsp + 256] LONG $0x7cb60f42; WORD $0x1832 // movzx edi, byte [rdx + r14 + 24] LONG $0xcf6ef9c5 // vmovd xmm1, edi QUAD $0x000000e0248c8b4c // mov r9, qword [rsp + 224] QUAD $0x01180a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 24], 1 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] + QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] QUAD $0x0218024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 2 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x0318024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 3 @@ -32877,7 +34149,7 @@ LBB5_169: QUAD $0x04181a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 24], 4 LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] QUAD $0x0518024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 24], 5 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x0618024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 6 QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] QUAD $0x0718024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 7 @@ -32886,23 +34158,23 @@ LBB5_169: QUAD $0x09181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 9 LONG $0x246c8b4c; BYTE $0x40 // mov r13, qword [rsp + 64] QUAD $0x0a182a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 24], 10 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0b180a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 24], 11 LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0c18324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 24], 12 QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0d18024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 13 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x0e181a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 24], 14 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f18024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 24], 15 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] LONG $0x027cb60f; BYTE $0x19 // movzx edi, byte [rdx + rax + 25] LONG $0xd76ef9c5 // vmovd xmm2, edi QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x011902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 1 QUAD $0x02193a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 25], 2 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] QUAD $0x031902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 3 QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] QUAD $0x041902542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rax + 25], 4 @@ -32910,37 +34182,37 @@ LBB5_169: QUAD $0x05193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 5 QUAD $0x000000e824bc8b48 // mov rdi, qword [rsp + 232] QUAD $0x06193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 6 - LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x07193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 7 LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x08193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 8 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x09193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 9 LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] QUAD $0x0a193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 10 QUAD $0x0b1922542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 25], 11 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] QUAD $0x0c193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 12 QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x0d193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 13 LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x0e193a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 25], 14 - QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] QUAD $0x0f193a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 25], 15 LONG $0x7cb60f42; WORD $0x1932 // movzx edi, byte [rdx + r14 + 25] LONG $0xdf6ef9c5 // vmovd xmm3, edi QUAD $0x01190a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 25], 1 - QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] + QUAD $0x000000c8248c8b4c // mov r9, qword [rsp + 200] QUAD $0x02190a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 25], 2 QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] QUAD $0x03193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 3 QUAD $0x04191a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 25], 4 QUAD $0x0519025c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r8 + 25], 5 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x06193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 6 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] QUAD $0x07193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x08193a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 25], 8 QUAD $0x0919125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 25], 9 QUAD $0x0a192a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 25], 10 @@ -32955,31 +34227,31 @@ LBB5_169: QUAD $0x0f190a442061e3c4 // vpinsrb xmm0, xmm3, byte [rdx + rcx + 25], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x00024024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 576], ymm0 - QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] + QUAD $0x000000d8249c8b4c // mov r11, qword [rsp + 216] LONG $0x7cb60f42; WORD $0x1a1a // movzx edi, byte [rdx + r11 + 26] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x011a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 1 - QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x000000b824848b4c // mov r8, qword [rsp + 184] QUAD $0x021a02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 26], 2 - QUAD $0x000000c8248c8b48 // mov rcx, qword [rsp + 200] + QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] QUAD $0x031a0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 26], 3 QUAD $0x041a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 4 QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] QUAD $0x051a32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 26], 5 QUAD $0x000000e824848b48 // mov rax, qword [rsp + 232] QUAD $0x061a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 6 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x071a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 7 LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x081a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x091a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 9 LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] QUAD $0x0a1a22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 26], 10 LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] QUAD $0x0b1a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c1a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 12 QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] QUAD $0x0d1a02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 26], 13 @@ -32999,21 +34271,21 @@ LBB5_169: QUAD $0x041a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 4 LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x051a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 5 - QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] + QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] QUAD $0x061a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 6 QUAD $0x00000090248c8b4c // mov r9, qword [rsp + 144] QUAD $0x071a0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 26], 7 - QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] QUAD $0x081a3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 26], 8 QUAD $0x091a124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 26], 9 LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] QUAD $0x0a1a1a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rbx + 26], 10 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] QUAD $0x0b1a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 11 LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x0c1a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 12 QUAD $0x0d1a2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 26], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0e1a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 14 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x0f1a3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 26], 15 @@ -33023,32 +34295,32 @@ LBB5_169: QUAD $0x011b1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 27], 1 QUAD $0x021b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 27], 2 QUAD $0x031b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 3 - QUAD $0x000000a024848b4c // mov r8, qword [rsp + 160] - QUAD $0x041b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 27], 4 + QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] + QUAD $0x041b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 4 QUAD $0x051b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 27], 5 - QUAD $0x000000e824b48b48 // mov rsi, qword [rsp + 232] - QUAD $0x061b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 27], 6 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] - QUAD $0x071b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 7 - LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] - QUAD $0x081b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 8 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] - QUAD $0x091b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 9 + QUAD $0x000000e824ac8b4c // mov r13, qword [rsp + 232] + QUAD $0x061b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 27], 6 + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] + QUAD $0x071b02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 27], 7 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x081b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 27], 8 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x091b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 27], 9 QUAD $0x0a1b22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 27], 10 - LONG $0x246c8b4c; BYTE $0x58 // mov r13, qword [rsp + 88] - QUAD $0x0b1b2a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r13 + 27], 11 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] - QUAD $0x0c1b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 12 - QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] - QUAD $0x0d1b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 13 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x0b1b32542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rsi + 27], 11 + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + QUAD $0x0c1b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 12 + QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] + QUAD $0x0d1b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 13 QUAD $0x0e1b32542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r14 + 27], 14 - QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - QUAD $0x0f1b0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 27], 15 - QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] - LONG $0x0a7cb60f; BYTE $0x1b // movzx edi, byte [rdx + rcx + 27] + QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + QUAD $0x0f1b3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 27], 15 + QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] + LONG $0x3a7cb60f; BYTE $0x1b // movzx edi, byte [rdx + rdi + 27] LONG $0xdf6ef9c5 // vmovd xmm3, edi - QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] - QUAD $0x011b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 1 + QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x011b3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 27], 1 QUAD $0x021b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 2 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x031b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 3 @@ -33056,155 +34328,154 @@ LBB5_169: QUAD $0x041b325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 27], 4 LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x051b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 5 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x061b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 6 QUAD $0x071b0a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r9 + 27], 7 QUAD $0x081b3a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r15 + 27], 8 QUAD $0x091b125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 27], 9 QUAD $0x0a1b1a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rbx + 27], 10 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0b1b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 11 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c1b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 12 - QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] - QUAD $0x0d1b0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 27], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0e1b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 14 - QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] - QUAD $0x0f1b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 15 + QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] + QUAD $0x0d1b025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 27], 13 + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + QUAD $0x0e1b3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 27], 14 + QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] + QUAD $0x0f1b3a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rdi + 27], 15 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x00026024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 608], ymm0 LONG $0x3865e3c4; WORD $0x01c2 // vinserti128 ymm0, ymm3, xmm2, 1 QUAD $0x00028024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 640], ymm0 - QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] - LONG $0x7cb60f42; WORD $0x1c12 // movzx edi, byte [rdx + r10 + 28] + QUAD $0x000000d8248c8b4c // mov r9, qword [rsp + 216] + LONG $0x7cb60f42; WORD $0x1c0a // movzx edi, byte [rdx + r9 + 28] LONG $0xc76ef9c5 // vmovd xmm0, edi QUAD $0x011c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 28], 1 - QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x000000b824bc8b4c // mov r15, qword [rsp + 184] QUAD $0x021c3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 28], 2 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] - QUAD $0x031c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 28], 3 - QUAD $0x041c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 4 + QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] + QUAD $0x031c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 28], 3 + QUAD $0x041c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 4 QUAD $0x000000f824a48b4c // mov r12, qword [rsp + 248] QUAD $0x051c22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 28], 5 - QUAD $0x061c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 6 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x071c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 28], 7 - LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] - QUAD $0x081c0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 28], 8 - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] - QUAD $0x091c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 9 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] - QUAD $0x0a1c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 28], 10 - QUAD $0x0b1c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 28], 11 - LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] - QUAD $0x0c1c1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 28], 12 - QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] - QUAD $0x0d1c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 28], 13 - LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] - QUAD $0x0e1c1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 28], 14 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x0f1c02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 28], 15 - QUAD $0x0000010024848b48 // mov rax, qword [rsp + 256] - LONG $0x027cb60f; BYTE $0x1c // movzx edi, byte [rdx + rax + 28] + QUAD $0x061c2a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r13 + 28], 6 + QUAD $0x071c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 7 + LONG $0x24448b4c; BYTE $0x70 // mov r8, qword [rsp + 112] + QUAD $0x081c02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 28], 8 + LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] + QUAD $0x091c12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 28], 9 + LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] + QUAD $0x0a1c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 10 + QUAD $0x0b1c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 11 + LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] + QUAD $0x0c1c1a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rbx + 28], 12 + QUAD $0x000000a8248c8b48 // mov rcx, qword [rsp + 168] + QUAD $0x0d1c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 13 + LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] + QUAD $0x0e1c0a442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rcx + 28], 14 + QUAD $0x000000b024b48b48 // mov rsi, qword [rsp + 176] + QUAD $0x0f1c32442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rsi + 28], 15 + QUAD $0x0000010024b48b48 // mov rsi, qword [rsp + 256] + LONG $0x327cb60f; BYTE $0x1c // movzx edi, byte [rdx + rsi + 28] LONG $0xcf6ef9c5 // vmovd xmm1, edi - QUAD $0x000000e024848b48 // mov rax, qword [rsp + 224] - QUAD $0x011c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 1 - QUAD $0x000000d824848b48 // mov rax, qword [rsp + 216] - QUAD $0x021c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 2 + QUAD $0x000000e024b48b48 // mov rsi, qword [rsp + 224] + QUAD $0x011c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 1 + QUAD $0x000000c824b48b48 // mov rsi, qword [rsp + 200] + QUAD $0x021c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 2 QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x031c324c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rsi + 28], 3 QUAD $0x041c324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 28], 4 LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] QUAD $0x051c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 5 - QUAD $0x000000d024b48b4c // mov r14, qword [rsp + 208] + QUAD $0x0000009824b48b4c // mov r14, qword [rsp + 152] QUAD $0x061c324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 28], 6 QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] QUAD $0x071c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 7 - QUAD $0x000000b824bc8b48 // mov rdi, qword [rsp + 184] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x081c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 8 QUAD $0x0000010824bc8b48 // mov rdi, qword [rsp + 264] QUAD $0x091c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 9 LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] QUAD $0x0a1c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 10 - LONG $0x246c8b4c; BYTE $0x28 // mov r13, qword [rsp + 40] + LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] QUAD $0x0b1c2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 28], 11 LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x0c1c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 12 - QUAD $0x0d1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x0e1c0a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rcx + 28], 14 + QUAD $0x0d1c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 13 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0e1c024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 28], 14 QUAD $0x0000012024bc8b48 // mov rdi, qword [rsp + 288] QUAD $0x0f1c3a4c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rdi + 28], 15 - LONG $0x7cb60f42; WORD $0x1d12 // movzx edi, byte [rdx + r10 + 29] + LONG $0x7cb60f42; WORD $0x1d0a // movzx edi, byte [rdx + r9 + 29] LONG $0xd76ef9c5 // vmovd xmm2, edi - QUAD $0x0000008024948b4c // mov r10, qword [rsp + 128] - QUAD $0x011d12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 29], 1 + QUAD $0x00000080248c8b4c // mov r9, qword [rsp + 128] + QUAD $0x011d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 1 QUAD $0x021d3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 29], 2 - QUAD $0x000000c824bc8b48 // mov rdi, qword [rsp + 200] - QUAD $0x031d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 3 + QUAD $0x031d1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 29], 3 QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] QUAD $0x041d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 4 QUAD $0x051d22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 29], 5 QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] QUAD $0x061d3a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r15 + 29], 6 - LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x071d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 7 - QUAD $0x081d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 8 - QUAD $0x091d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 9 - LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] - QUAD $0x0a1d22542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r12 + 29], 10 - LONG $0x244c8b4c; BYTE $0x58 // mov r9, qword [rsp + 88] - QUAD $0x0b1d0a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r9 + 29], 11 - QUAD $0x0c1d1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 29], 12 + QUAD $0x081d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 8 + QUAD $0x091d12542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r10 + 29], 9 + LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] + QUAD $0x0a1d1a542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r11 + 29], 10 + LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] + QUAD $0x0b1d02542069a3c4 // vpinsrb xmm2, xmm2, byte [rdx + r8 + 29], 11 + QUAD $0x0c1d1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 29], 12 QUAD $0x000000a824bc8b48 // mov rdi, qword [rsp + 168] QUAD $0x0d1d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 13 - QUAD $0x0e1d1a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rbx + 29], 14 - QUAD $0x0000009824bc8b48 // mov rdi, qword [rsp + 152] - QUAD $0x0f1d3a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rdi + 29], 15 - QUAD $0x0000010024848b4c // mov r8, qword [rsp + 256] - LONG $0x7cb60f42; WORD $0x1d02 // movzx edi, byte [rdx + r8 + 29] + QUAD $0x0e1d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 14 + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x0f1d0a542069e3c4 // vpinsrb xmm2, xmm2, byte [rdx + rcx + 29], 15 + QUAD $0x00000100249c8b48 // mov rbx, qword [rsp + 256] + LONG $0x1a7cb60f; BYTE $0x1d // movzx edi, byte [rdx + rbx + 29] LONG $0xdf6ef9c5 // vmovd xmm3, edi - QUAD $0x000000e0249c8b4c // mov r11, qword [rsp + 224] - QUAD $0x011d1a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r11 + 29], 1 - QUAD $0x021d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 2 + QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] + QUAD $0x011d225c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r12 + 29], 1 + QUAD $0x000000c824948b4c // mov r10, qword [rsp + 200] + QUAD $0x021d125c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r10 + 29], 2 QUAD $0x031d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 3 QUAD $0x000000c024b48b48 // mov rsi, qword [rsp + 192] QUAD $0x041d325c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rsi + 29], 4 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] - QUAD $0x051d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 5 + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + QUAD $0x051d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 5 QUAD $0x061d325c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r14 + 29], 6 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x071d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] - QUAD $0x081d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 8 - QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] - QUAD $0x091d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 9 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x0a1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 10 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x071d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 7 + QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x081d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 8 + QUAD $0x00000108248c8b48 // mov rcx, qword [rsp + 264] + QUAD $0x091d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 9 + LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + QUAD $0x0a1d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 10 QUAD $0x0b1d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 29], 11 LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] QUAD $0x0c1d2a5c2061a3c4 // vpinsrb xmm3, xmm3, byte [rdx + r13 + 29], 12 - QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] - QUAD $0x0d1d025c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rax + 29], 13 - QUAD $0x0e1d0a642061e3c4 // vpinsrb xmm4, xmm3, byte [rdx + rcx + 29], 14 + QUAD $0x00000140248c8b48 // mov rcx, qword [rsp + 320] + QUAD $0x0d1d0a5c2061e3c4 // vpinsrb xmm3, xmm3, byte [rdx + rcx + 29], 13 + QUAD $0x0e1d02642061e3c4 // vpinsrb xmm4, xmm3, byte [rdx + rax + 29], 14 LONG $0x3875e3c4; WORD $0x01c0 // vinserti128 ymm0, ymm1, xmm0, 1 QUAD $0x0002a024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 672], ymm0 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] QUAD $0x0f1d02442059e3c4 // vpinsrb xmm0, xmm4, byte [rdx + rax + 29], 15 LONG $0x387de3c4; WORD $0x01c2 // vinserti128 ymm0, ymm0, xmm2, 1 QUAD $0x0002c024847ffdc5; BYTE $0x00 // vmovdqa yword [rsp + 704], ymm0 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] + QUAD $0x000000d8248c8b48 // mov rcx, qword [rsp + 216] LONG $0x0a7cb60f; BYTE $0x1e // movzx edi, byte [rdx + rcx + 30] LONG $0xc76ef9c5 // vmovd xmm0, edi - QUAD $0x011e12442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r10 + 30], 1 + QUAD $0x011e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 30], 1 LONG $0x0a7cb60f; BYTE $0x1f // movzx edi, byte [rdx + rcx + 31] LONG $0xcf6ef9c5 // vmovd xmm1, edi - QUAD $0x011f124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 31], 1 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x011f0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 31], 1 + QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] QUAD $0x021e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 2 QUAD $0x021f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 2 - QUAD $0x000000c824848b48 // mov rax, qword [rsp + 200] + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] QUAD $0x031e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 3 QUAD $0x031f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 3 QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] @@ -33215,21 +34486,21 @@ LBB5_169: QUAD $0x051f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 5 QUAD $0x061e3a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r15 + 30], 6 QUAD $0x061f3a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r15 + 31], 6 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x071e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 7 QUAD $0x071f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 7 - QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] + QUAD $0x0000011024bc8b48 // mov rdi, qword [rsp + 272] LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x081e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 8 QUAD $0x081f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 8 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x091e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 9 QUAD $0x091f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 9 - QUAD $0x0a1e22442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r12 + 30], 10 - QUAD $0x0a1f224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 31], 10 - QUAD $0x0b1e0a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r9 + 30], 11 - QUAD $0x0b1f0a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r9 + 31], 11 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x0a1e1a442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r11 + 30], 10 + QUAD $0x0a1f1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 31], 10 + QUAD $0x0b1e02442079a3c4 // vpinsrb xmm0, xmm0, byte [rdx + r8 + 30], 11 + QUAD $0x0b1f024c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r8 + 31], 11 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0c1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 12 QUAD $0x0c1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 12 QUAD $0x000000a824848b48 // mov rax, qword [rsp + 168] @@ -33238,17 +34509,15 @@ LBB5_169: LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0e1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 14 QUAD $0x0e1f024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 31], 14 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] QUAD $0x0f1e02442079e3c4 // vpinsrb xmm0, xmm0, byte [rdx + rax + 30], 15 QUAD $0x0f1f02542071e3c4 // vpinsrb xmm2, xmm1, byte [rdx + rax + 31], 15 - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - LONG $0x44b60f42; WORD $0x1e02 // movzx eax, byte [rdx + r8 + 30] + LONG $0x1a44b60f; BYTE $0x1e // movzx eax, byte [rdx + rbx + 30] LONG $0xc86ef9c5 // vmovd xmm1, eax - QUAD $0x011e1a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r11 + 30], 1 - LONG $0x44b60f42; WORD $0x1f02 // movzx eax, byte [rdx + r8 + 31] + QUAD $0x011e224c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r12 + 30], 1 + LONG $0x1a44b60f; BYTE $0x1f // movzx eax, byte [rdx + rbx + 31] LONG $0xf86ef9c5 // vmovd xmm7, eax - QUAD $0x011f1a7c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r11 + 31], 1 - QUAD $0x000000d824948b4c // mov r10, qword [rsp + 216] + QUAD $0x011f227c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r12 + 31], 1 QUAD $0x021e124c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r10 + 30], 2 QUAD $0x021f127c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r10 + 31], 2 QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] @@ -33259,12 +34528,13 @@ LBB5_169: LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x051e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 5 QUAD $0x051f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 5 - QUAD $0x061e324c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r14 + 30], 6 - QUAD $0x061f327c2041a3c4 // vpinsrb xmm7, xmm7, byte [rdx + r14 + 31], 6 + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] + QUAD $0x061e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 6 + QUAD $0x061f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 6 QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] QUAD $0x071e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 7 QUAD $0x071f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 7 - QUAD $0x000000b824848b48 // mov rax, qword [rsp + 184] + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] QUAD $0x081e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 8 QUAD $0x081f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 8 QUAD $0x0000010824848b48 // mov rax, qword [rsp + 264] @@ -33273,7 +34543,7 @@ LBB5_169: LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0a1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 10 QUAD $0x0a1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 10 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0b1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 11 QUAD $0x0b1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 11 QUAD $0x0c1e2a4c2071a3c4 // vpinsrb xmm1, xmm1, byte [rdx + r13 + 30], 12 @@ -33281,7 +34551,7 @@ LBB5_169: QUAD $0x0000014024848b48 // mov rax, qword [rsp + 320] QUAD $0x0d1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 13 QUAD $0x0d1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0e1e024c2071e3c4 // vpinsrb xmm1, xmm1, byte [rdx + rax + 30], 14 QUAD $0x0e1f027c2041e3c4 // vpinsrb xmm7, xmm7, byte [rdx + rax + 31], 14 QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] @@ -33407,10 +34677,10 @@ LBB5_169: LONG $0x3865e3c4; WORD $0x01e0 // vinserti128 ymm4, ymm3, xmm0, 1 LONG $0x4665e3c4; WORD $0x31c0 // vperm2i128 ymm0, ymm3, ymm0, 49 QUAD $0x00000198248c8b48 // mov rcx, qword [rsp + 408] - LONG $0x7f7ec1c4; WORD $0x8f44; BYTE $0x60 // vmovdqu yword [r15 + 4*rcx + 96], ymm0 - LONG $0x7f7ec1c4; WORD $0x8f54; BYTE $0x40 // vmovdqu yword [r15 + 4*rcx + 64], ymm2 - LONG $0x7f7ec1c4; WORD $0x8f64; BYTE $0x20 // vmovdqu yword [r15 + 4*rcx + 32], ymm4 - LONG $0x7f7ec1c4; WORD $0x8f0c // vmovdqu yword [r15 + 4*rcx], ymm1 + LONG $0x447ffec5; WORD $0x608f // vmovdqu yword [rdi + 4*rcx + 96], ymm0 + LONG $0x547ffec5; WORD $0x408f // vmovdqu yword [rdi + 4*rcx + 64], ymm2 + LONG $0x647ffec5; WORD $0x208f // vmovdqu yword [rdi + 4*rcx + 32], ymm4 + LONG $0x0c7ffec5; BYTE $0x8f // vmovdqu yword [rdi + 4*rcx], ymm1 LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xcb // mov rbx, rcx QUAD $0x00000178248c3b48 // cmp rcx, qword [rsp + 376] @@ -33421,8 +34691,8 @@ LBB5_169: QUAD $0x0000019024ac8b4c // mov r13, qword [rsp + 400] QUAD $0x0000018824948b48 // mov rdx, qword [rsp + 392] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] - JNE LBB5_114 - JMP LBB5_133 + JNE LBB5_117 + JMP LBB5_120 TEXT ·_comparison_greater_arr_arr_avx2(SB), $80-48 @@ -36157,11 +37427,11 @@ TEXT ·_comparison_greater_arr_scalar_avx2(SB), $1384-48 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JLE LBB7_2 WORD $0xff83; BYTE $0x04 // cmp edi, 4 - JE LBB7_79 + JE LBB7_84 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB7_95 + JE LBB7_102 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB7_192 + JNE LBB7_191 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -36206,7 +37476,7 @@ LBB7_17: QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB7_113: +LBB7_120: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] LONG $0x046e3944 // cmp dword [rsi + 4], r13d @@ -36358,240 +37628,278 @@ LBB7_113: WORD $0x0841; BYTE $0xc8 // or r8b, cl LONG $0x027a8844 // mov byte [rdx + 2], r15b LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x000000f024948948 // mov qword [rsp + 240], rdx - QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 - JNE LBB7_113 - QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_116 - JMP LBB7_192 - -LBB7_19: - WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JLE LBB7_20 - WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB7_148 - WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB7_164 - WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB7_192 - LONG $0x1f7a8d4d // lea r15, [r10 + 31] - WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xfa490f4d // cmovns r15, r10 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - LONG $0x0210fbc5 // vmovsd xmm0, qword [rdx] - WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_35 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d - -LBB7_33: - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - LONG $0x08768d48 // lea rsi, [rsi + 8] - WORD $0xd219 // sbb edx, edx - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax - LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b - QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] - WORD $0xc189 // mov ecx, eax - WORD $0x2944; BYTE $0xc1 // sub ecx, r8d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0xd320 // and bl, dl - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3b1c8841 // mov byte [r11 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_33 - LONG $0x01c38349 // add r11, 1 - -LBB7_35: - LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fa8349 // cmp r10, 32 - JL LBB7_36 - QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 - QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 - LONG $0x247c894c; BYTE $0x78 // mov qword [rsp + 120], r15 - QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 - -LBB7_181: - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - QUAD $0x000000802494920f // setb byte [rsp + 128] - LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] - LONG $0xd1920f41 // setb r9b - LONG $0x462ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rsi + 16] - LONG $0xd6920f41 // setb r14b - LONG $0x462ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rsi + 24] - LONG $0xd5920f41 // setb r13b - LONG $0x462ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rsi + 32] - LONG $0x2454920f; BYTE $0x58 // setb byte [rsp + 88] - LONG $0x462ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rsi + 40] - LONG $0x2454920f; BYTE $0x30 // setb byte [rsp + 48] - LONG $0x462ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rsi + 48] - WORD $0x920f; BYTE $0xd0 // setb al - LONG $0x462ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rsi + 56] - WORD $0x920f; BYTE $0xd3 // setb bl - LONG $0x462ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rsi + 64] - LONG $0x2454920f; BYTE $0x70 // setb byte [rsp + 112] - LONG $0x462ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rsi + 72] - WORD $0x920f; BYTE $0xd2 // setb dl - LONG $0x462ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rsi + 80] - LONG $0xd7920f40 // setb dil - LONG $0x462ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rsi + 88] - LONG $0xd2920f41 // setb r10b - LONG $0x462ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rsi + 96] - LONG $0xd3920f41 // setb r11b - LONG $0x462ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rsi + 104] - LONG $0xd4920f41 // setb r12b - LONG $0x462ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rsi + 112] - LONG $0x2454920f; BYTE $0x48 // setb byte [rsp + 72] - LONG $0x462ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rsi + 120] - WORD $0x920f; BYTE $0xd1 // setb cl - QUAD $0x00000080862ef9c5 // vucomisd xmm0, qword [rsi + 128] - LONG $0x2454920f; BYTE $0x40 // setb byte [rsp + 64] - QUAD $0x00000088862ef9c5 // vucomisd xmm0, qword [rsi + 136] - LONG $0x2454920f; BYTE $0x68 // setb byte [rsp + 104] - QUAD $0x00000090862ef9c5 // vucomisd xmm0, qword [rsi + 144] - LONG $0x2454920f; BYTE $0x50 // setb byte [rsp + 80] - QUAD $0x00000098862ef9c5 // vucomisd xmm0, qword [rsi + 152] - LONG $0x2454920f; BYTE $0x60 // setb byte [rsp + 96] - QUAD $0x000000a0862ef9c5 // vucomisd xmm0, qword [rsi + 160] - LONG $0x2454920f; BYTE $0x28 // setb byte [rsp + 40] - QUAD $0x000000a8862ef9c5 // vucomisd xmm0, qword [rsi + 168] - LONG $0x2454920f; BYTE $0x38 // setb byte [rsp + 56] - QUAD $0x000000b0862ef9c5 // vucomisd xmm0, qword [rsi + 176] - LONG $0x2454920f; BYTE $0x18 // setb byte [rsp + 24] - QUAD $0x000000b8862ef9c5 // vucomisd xmm0, qword [rsi + 184] - LONG $0xd7920f41 // setb r15b - QUAD $0x000000c0862ef9c5 // vucomisd xmm0, qword [rsi + 192] - QUAD $0x000001402494920f // setb byte [rsp + 320] - QUAD $0x000000c8862ef9c5 // vucomisd xmm0, qword [rsi + 200] - LONG $0x2454920f; BYTE $0x20 // setb byte [rsp + 32] - QUAD $0x000000d0862ef9c5 // vucomisd xmm0, qword [rsi + 208] - LONG $0x2454920f; BYTE $0x10 // setb byte [rsp + 16] - QUAD $0x000000d8862ef9c5 // vucomisd xmm0, qword [rsi + 216] - LONG $0x2454920f; BYTE $0x08 // setb byte [rsp + 8] - QUAD $0x000000e0862ef9c5 // vucomisd xmm0, qword [rsi + 224] - QUAD $0x000001202494920f // setb byte [rsp + 288] - QUAD $0x000000e8862ef9c5 // vucomisd xmm0, qword [rsi + 232] - QUAD $0x000001002494920f // setb byte [rsp + 256] - QUAD $0x000000f0862ef9c5 // vucomisd xmm0, qword [rsi + 240] - LONG $0x2454920f; BYTE $0x04 // setb byte [rsp + 4] - QUAD $0x000000f8862ef9c5 // vucomisd xmm0, qword [rsi + 248] - LONG $0xd0920f41 // setb r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000080248c0244 // add r9b, byte [rsp + 128] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x30 // movzx edx, byte [rsp + 48] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x48 // movzx edi, byte [rsp + 72] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xc000 // add al, al - LONG $0x40244402 // add al, byte [rsp + 64] - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x28 // movzx edx, byte [rsp + 40] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x38 // movzx edx, byte [rsp + 56] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000f024948b48 // mov rdx, qword [rsp + 240] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x18 // movzx ebx, byte [rsp + 24] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xc900 // add cl, cl - LONG $0x40248c02; WORD $0x0001; BYTE $0x00 // add cl, byte [rsp + 320] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000100248cb60f // movzx ecx, byte [rsp + 256] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c28348 // add rdx, 4 QUAD $0x000000f024948948 // mov qword [rsp + 240], rdx - LONG $0x24448348; WORD $0xff78 // add qword [rsp + 120], -1 - JNE LBB7_181 + QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 + JNE LBB7_120 QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] + QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_184 - JMP LBB7_192 + JL LBB7_123 + JMP LBB7_191 + +LBB7_19: + WORD $0xff83; BYTE $0x08 // cmp edi, 8 + JLE LBB7_20 + WORD $0xff83; BYTE $0x09 // cmp edi, 9 + JE LBB7_155 + WORD $0xff83; BYTE $0x0b // cmp edi, 11 + JE LBB7_171 + WORD $0xff83; BYTE $0x0c // cmp edi, 12 + JNE LBB7_191 + LONG $0x1f728d4d // lea r14, [r10 + 31] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xf2490f4d // cmovns r14, r10 + LONG $0x07418d41 // lea eax, [r9 + 7] + WORD $0x8545; BYTE $0xc9 // test r9d, r9d + LONG $0xc1490f41 // cmovns eax, r9d + WORD $0xe083; BYTE $0xf8 // and eax, -8 + LONG $0x0210fbc5 // vmovsd xmm0, qword [rdx] + WORD $0x2941; BYTE $0xc1 // sub r9d, eax + JE LBB7_35 + WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + +LBB7_33: + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] + LONG $0x08c68348 // add rsi, 8 + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0xdaf6 // neg dl + LONG $0x07788d48 // lea rdi, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xf8490f48 // cmovns rdi, rax + LONG $0x03ffc148 // sar rdi, 3 + LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] + WORD $0x3044; BYTE $0xca // xor dl, r9b + QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] + WORD $0xc189 // mov ecx, eax + WORD $0x2944; BYTE $0xc1 // sub ecx, r8d + LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 + WORD $0xe3d3 // shl ebx, cl + WORD $0xd320 // and bl, dl + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x3b1c8841 // mov byte [r11 + rdi], bl + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 + JNE LBB7_33 + LONG $0x01c38349 // add r11, 1 + +LBB7_35: + LONG $0x05fec149 // sar r14, 5 + LONG $0x20fa8349 // cmp r10, 32 + JL LBB7_39 + QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 + QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 + LONG $0x2474894c; BYTE $0x78 // mov qword [rsp + 120], r14 + QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 + +LBB7_37: + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] + LONG $0x5610fbc5; BYTE $0x08 // vmovsd xmm2, qword [rsi + 8] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000001202494970f // seta byte [rsp + 288] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0x4e10fbc5; BYTE $0x10 // vmovsd xmm1, qword [rsi + 16] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x4e10fbc5; BYTE $0x18 // vmovsd xmm1, qword [rsi + 24] + LONG $0xd5970f41 // seta r13b + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0xd4970f41 // seta r12b + LONG $0x4e10fbc5; BYTE $0x20 // vmovsd xmm1, qword [rsi + 32] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x4e10fbc5; BYTE $0x28 // vmovsd xmm1, qword [rsi + 40] + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0x4e10fbc5; BYTE $0x30 // vmovsd xmm1, qword [rsi + 48] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x4e10fbc5; BYTE $0x38 // vmovsd xmm1, qword [rsi + 56] + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0x4e10fbc5; BYTE $0x40 // vmovsd xmm1, qword [rsi + 64] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x4e10fbc5; BYTE $0x48 // vmovsd xmm1, qword [rsi + 72] + QUAD $0x000001002494970f // seta byte [rsp + 256] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + LONG $0x4e10fbc5; BYTE $0x50 // vmovsd xmm1, qword [rsi + 80] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x4e10fbc5; BYTE $0x58 // vmovsd xmm1, qword [rsi + 88] + LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000001402494970f // seta byte [rsp + 320] + LONG $0x4e10fbc5; BYTE $0x60 // vmovsd xmm1, qword [rsi + 96] + LONG $0x5e10fbc5; BYTE $0x68 // vmovsd xmm3, qword [rsi + 104] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x6e10fbc5; BYTE $0x70 // vmovsd xmm5, qword [rsi + 112] + LONG $0x7610fbc5; BYTE $0x78 // vmovsd xmm6, qword [rsi + 120] + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + QUAD $0x0000008086107bc5 // vmovsd xmm8, qword [rsi + 128] + QUAD $0x000000888e107bc5 // vmovsd xmm9, qword [rsi + 136] + LONG $0xd82ef9c5 // vucomisd xmm3, xmm0 + QUAD $0x0000009096107bc5 // vmovsd xmm10, qword [rsi + 144] + QUAD $0x000000989e107bc5 // vmovsd xmm11, qword [rsi + 152] + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + QUAD $0x000000a0a6107bc5 // vmovsd xmm12, qword [rsi + 160] + QUAD $0x000000a8ae107bc5 // vmovsd xmm13, qword [rsi + 168] + LONG $0xe82ef9c5 // vucomisd xmm5, xmm0 + QUAD $0x000000b0b6107bc5 // vmovsd xmm14, qword [rsi + 176] + QUAD $0x000000b89610fbc5 // vmovsd xmm2, qword [rsi + 184] + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] + QUAD $0x000000c09e10fbc5 // vmovsd xmm3, qword [rsi + 192] + QUAD $0x000000c8a610fbc5 // vmovsd xmm4, qword [rsi + 200] + LONG $0xf02ef9c5 // vucomisd xmm6, xmm0 + QUAD $0x000000d0b610fbc5 // vmovsd xmm6, qword [rsi + 208] + QUAD $0x000000d8be10fbc5 // vmovsd xmm7, qword [rsi + 216] + LONG $0xd3970f41 // seta r11b + QUAD $0x000000e08e10fbc5 // vmovsd xmm1, qword [rsi + 224] + QUAD $0x000000e8ae10fbc5 // vmovsd xmm5, qword [rsi + 232] + LONG $0xc02e79c5 // vucomisd xmm8, xmm0 + QUAD $0x000000802494970f // seta byte [rsp + 128] + LONG $0xc82e79c5 // vucomisd xmm9, xmm0 + WORD $0x970f; BYTE $0xd1 // seta cl + LONG $0xd02e79c5 // vucomisd xmm10, xmm0 + LONG $0xd7970f40 // seta dil + LONG $0xd82e79c5 // vucomisd xmm11, xmm0 + LONG $0xd0970f41 // seta r8b + LONG $0xe02e79c5 // vucomisd xmm12, xmm0 + LONG $0xd2970f41 // seta r10b + LONG $0xe82e79c5 // vucomisd xmm13, xmm0 + LONG $0xd6970f41 // seta r14b + LONG $0xf02e79c5 // vucomisd xmm14, xmm0 + LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + LONG $0xd1970f41 // seta r9b + LONG $0xd82ef9c5 // vucomisd xmm3, xmm0 + LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] + LONG $0xe02ef9c5 // vucomisd xmm4, xmm0 + LONG $0xd7970f41 // seta r15b + LONG $0xf02ef9c5 // vucomisd xmm6, xmm0 + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0xf82ef9c5 // vucomisd xmm7, xmm0 + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0xe82ef9c5 // vucomisd xmm5, xmm0 + QUAD $0x000000f08e10fbc5 // vmovsd xmm1, qword [rsi + 240] + LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x04 // seta byte [rsp + 4] + QUAD $0x000000f88e10fbc5 // vmovsd xmm1, qword [rsi + 248] + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0xdb00 // add bl, bl + LONG $0x20249c02; WORD $0x0001; BYTE $0x00 // add bl, byte [rsp + 288] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + WORD $0x8941; BYTE $0xdc // mov r12d, ebx + LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + LONG $0x64b60f44; WORD $0x3024 // movzx r12d, byte [rsp + 48] + LONG $0x06e4c041 // shl r12b, 6 + LONG $0x6cb60f44; WORD $0x1824 // movzx r13d, byte [rsp + 24] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + WORD $0xc000 // add al, al + LONG $0x00248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 256] + LONG $0x64b60f44; WORD $0x1024 // movzx r12d, byte [rsp + 16] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x8944; BYTE $0xe0 // mov eax, r12d + QUAD $0x00014024a4b60f44; BYTE $0x00 // movzx r12d, byte [rsp + 320] + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0xc308 // or bl, al + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e3c041 // shl r11b, 7 + WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0xc900 // add cl, cl + LONG $0x80248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 128] + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x04e2c041 // shl r10b, 4 + WORD $0x0845; BYTE $0xc2 // or r10b, r8b + LONG $0x05e6c041 // shl r14b, 5 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0841; BYTE $0xdb // or r11b, bl + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e1c041 // shl r9b, 7 + WORD $0x0841; BYTE $0xc1 // or r9b, al + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + WORD $0x8844; BYTE $0x28 // mov byte [rax], r13b + WORD $0x0845; BYTE $0xf1 // or r9b, r14b + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x58 // add r15b, byte [rsp + 88] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0x0844; BYTE $0xf9 // or cl, r15b + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xd908 // or cl, bl + LONG $0x01588844 // mov byte [rax + 1], r11b + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + LONG $0x02488844 // mov byte [rax + 2], r9b + WORD $0xca08 // or dl, cl + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl + LONG $0x04c08348 // add rax, 4 + QUAD $0x000000f024848948 // mov qword [rsp + 240], rax + LONG $0x24448348; WORD $0xff78 // add qword [rsp + 120], -1 + JNE LBB7_37 + QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] + QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] + QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] + +LBB7_39: + LONG $0x05e6c149 // shl r14, 5 + WORD $0x394d; BYTE $0xd6 // cmp r14, r10 + JGE LBB7_191 + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0x294d; BYTE $0xf0 // sub r8, r14 + WORD $0xf749; BYTE $0xd6 // not r14 + WORD $0x014d; BYTE $0xd6 // add r14, r10 + JNE LBB7_186 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB7_188 LBB7_2: WORD $0xff83; BYTE $0x02 // cmp edi, 2 - JE LBB7_37 + JE LBB7_42 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB7_192 + JNE LBB7_191 WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] LONG $0x1f6a8d4d // lea r13, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -36637,27 +37945,27 @@ LBB7_8: LONG $0x24748944; BYTE $0x04 // mov dword [rsp + 4], r14d QUAD $0x000000f824bc894c // mov qword [rsp + 248], r15 QUAD $0x0000016824ac894c // mov qword [rsp + 360], r13 - JB LBB7_62 + JB LBB7_67 WORD $0x894c; BYTE $0xe8 // mov rax, r13 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB7_65 + JAE LBB7_70 LONG $0xab048d4b // lea rax, [r11 + 4*r13] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB7_65 + JAE LBB7_70 -LBB7_62: +LBB7_67: WORD $0xc031 // xor eax, eax QUAD $0x0000018024848948 // mov qword [rsp + 384], rax WORD $0x8949; BYTE $0xf4 // mov r12, rsi QUAD $0x00000160249c894c // mov qword [rsp + 352], r11 -LBB7_68: +LBB7_73: QUAD $0x0000018024ac2b4c // sub r13, qword [rsp + 384] LONG $0x246c894c; BYTE $0x78 // mov qword [rsp + 120], r13 -LBB7_69: +LBB7_74: WORD $0x894c; BYTE $0xe1 // mov rcx, r12 LONG $0x24343845 // cmp byte [r12], r14b QUAD $0x0000014024949f0f // setg byte [rsp + 320] @@ -36820,16 +38128,16 @@ LBB7_69: LONG $0x04c68348 // add rsi, 4 QUAD $0x0000016024b48948 // mov qword [rsp + 352], rsi LONG $0x24448348; WORD $0xff78 // add qword [rsp + 120], -1 - JNE LBB7_69 + JNE LBB7_74 QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] QUAD $0x0000016824ac8b4c // mov r13, qword [rsp + 360] - JMP LBB7_71 + JMP LBB7_76 LBB7_20: WORD $0xff83; BYTE $0x07 // cmp edi, 7 - JE LBB7_122 + JE LBB7_129 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB7_192 + JNE LBB7_191 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -36873,7 +38181,7 @@ LBB7_26: QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 -LBB7_139: +LBB7_146: QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] @@ -37028,16 +38336,16 @@ LBB7_139: LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c38349 // add r11, 4 QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 - JNE LBB7_139 + JNE LBB7_146 WORD $0x894d; BYTE $0xde // mov r14, r11 QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_142 - JMP LBB7_192 + JL LBB7_149 + JMP LBB7_191 -LBB7_79: +LBB7_84: LONG $0x2ab70f44 // movzx r13d, word [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -37047,10 +38355,10 @@ LBB7_79: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_83 + JE LBB7_88 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB7_81: +LBB7_86: LONG $0x2e3b4466 // cmp r13w, word [rsi] LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0xd219 // sbb edx, edx @@ -37070,19 +38378,19 @@ LBB7_81: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_81 + JNE LBB7_86 LONG $0x01c38349 // add r11, 1 -LBB7_83: +LBB7_88: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB7_84 + JL LBB7_89 QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB7_86: +LBB7_91: LONG $0x2e394466 // cmp word [rsi], r13w WORD $0x970f; BYTE $0xd0 // seta al LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w @@ -37239,16 +38547,16 @@ LBB7_86: LONG $0x04c28348 // add rdx, 4 QUAD $0x000000f024948948 // mov qword [rsp + 240], rdx QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 - JNE LBB7_86 + JNE LBB7_91 QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_89 - JMP LBB7_192 + JL LBB7_94 + JMP LBB7_191 -LBB7_95: +LBB7_102: LONG $0x2ab70f44 // movzx r13d, word [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -37258,10 +38566,10 @@ LBB7_95: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_99 + JE LBB7_106 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB7_97: +LBB7_104: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x9f0f; BYTE $0xd2 // setg dl @@ -37282,19 +38590,19 @@ LBB7_97: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_97 + JNE LBB7_104 LONG $0x01c38349 // add r11, 1 -LBB7_99: +LBB7_106: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB7_100 + JL LBB7_107 QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB7_102: +LBB7_109: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w @@ -37450,16 +38758,16 @@ LBB7_102: LONG $0x04c28348 // add rdx, 4 QUAD $0x000000f024948948 // mov qword [rsp + 240], rdx QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 - JNE LBB7_102 + JNE LBB7_109 QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_105 - JMP LBB7_192 + JL LBB7_112 + JMP LBB7_191 -LBB7_148: +LBB7_155: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -37469,10 +38777,10 @@ LBB7_148: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_152 + JE LBB7_159 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB7_150: +LBB7_157: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] WORD $0x9f0f; BYTE $0xd2 // setg dl @@ -37493,19 +38801,19 @@ LBB7_150: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_150 + JNE LBB7_157 LONG $0x01c38349 // add r11, 1 -LBB7_152: +LBB7_159: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB7_153 + JL LBB7_160 QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB7_155: +LBB7_162: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] LONG $0x086e394c // cmp qword [rsi + 8], r13 @@ -37657,228 +38965,266 @@ LBB7_155: WORD $0x0841; BYTE $0xc8 // or r8b, cl LONG $0x027a8844 // mov byte [rdx + 2], r15b LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x000000f024948948 // mov qword [rsp + 240], rdx - QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 - JNE LBB7_155 - QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_158 - JMP LBB7_192 - -LBB7_164: - LONG $0x1f7a8d4d // lea r15, [r10 + 31] - WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xfa490f4d // cmovns r15, r10 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - LONG $0x0210fac5 // vmovss xmm0, dword [rdx] - WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_168 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d - -LBB7_166: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - LONG $0x04768d48 // lea rsi, [rsi + 4] - WORD $0xd219 // sbb edx, edx - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax - LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b - QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] - WORD $0xc189 // mov ecx, eax - WORD $0x2944; BYTE $0xc1 // sub ecx, r8d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0xd320 // and bl, dl - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3b1c8841 // mov byte [r11 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_166 - LONG $0x01c38349 // add r11, 1 - -LBB7_168: - LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fa8349 // cmp r10, 32 - JL LBB7_169 - QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 - QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 - LONG $0x247c894c; BYTE $0x78 // mov qword [rsp + 120], r15 - QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 - -LBB7_171: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - QUAD $0x000000802494920f // setb byte [rsp + 128] - LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] - LONG $0xd1920f41 // setb r9b - LONG $0x462ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rsi + 8] - LONG $0xd6920f41 // setb r14b - LONG $0x462ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rsi + 12] - LONG $0xd5920f41 // setb r13b - LONG $0x462ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rsi + 16] - LONG $0x2454920f; BYTE $0x58 // setb byte [rsp + 88] - LONG $0x462ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rsi + 20] - LONG $0x2454920f; BYTE $0x30 // setb byte [rsp + 48] - LONG $0x462ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rsi + 24] - WORD $0x920f; BYTE $0xd0 // setb al - LONG $0x462ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rsi + 28] - WORD $0x920f; BYTE $0xd3 // setb bl - LONG $0x462ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rsi + 32] - LONG $0x2454920f; BYTE $0x70 // setb byte [rsp + 112] - LONG $0x462ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rsi + 36] - WORD $0x920f; BYTE $0xd2 // setb dl - LONG $0x462ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rsi + 40] - LONG $0xd7920f40 // setb dil - LONG $0x462ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rsi + 44] - LONG $0xd2920f41 // setb r10b - LONG $0x462ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rsi + 48] - LONG $0xd3920f41 // setb r11b - LONG $0x462ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rsi + 52] - LONG $0xd4920f41 // setb r12b - LONG $0x462ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rsi + 56] - LONG $0x2454920f; BYTE $0x48 // setb byte [rsp + 72] - LONG $0x462ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rsi + 60] - WORD $0x920f; BYTE $0xd1 // setb cl - LONG $0x462ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rsi + 64] - LONG $0x2454920f; BYTE $0x40 // setb byte [rsp + 64] - LONG $0x462ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rsi + 68] - LONG $0x2454920f; BYTE $0x68 // setb byte [rsp + 104] - LONG $0x462ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rsi + 72] - LONG $0x2454920f; BYTE $0x50 // setb byte [rsp + 80] - LONG $0x462ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rsi + 76] - LONG $0x2454920f; BYTE $0x60 // setb byte [rsp + 96] - LONG $0x462ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rsi + 80] - LONG $0x2454920f; BYTE $0x28 // setb byte [rsp + 40] - LONG $0x462ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rsi + 84] - LONG $0x2454920f; BYTE $0x38 // setb byte [rsp + 56] - LONG $0x462ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rsi + 88] - LONG $0x2454920f; BYTE $0x18 // setb byte [rsp + 24] - LONG $0x462ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rsi + 92] - LONG $0xd7920f41 // setb r15b - LONG $0x462ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rsi + 96] - QUAD $0x000001402494920f // setb byte [rsp + 320] - LONG $0x462ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rsi + 100] - LONG $0x2454920f; BYTE $0x20 // setb byte [rsp + 32] - LONG $0x462ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rsi + 104] - LONG $0x2454920f; BYTE $0x10 // setb byte [rsp + 16] - LONG $0x462ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rsi + 108] - LONG $0x2454920f; BYTE $0x08 // setb byte [rsp + 8] - LONG $0x462ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rsi + 112] - QUAD $0x000001202494920f // setb byte [rsp + 288] - LONG $0x462ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rsi + 116] - QUAD $0x000001002494920f // setb byte [rsp + 256] - LONG $0x462ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rsi + 120] - LONG $0x2454920f; BYTE $0x04 // setb byte [rsp + 4] - LONG $0x462ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rsi + 124] - LONG $0xd0920f41 // setb r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000080248c0244 // add r9b, byte [rsp + 128] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x30 // movzx edx, byte [rsp + 48] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x48 // movzx edi, byte [rsp + 72] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xc000 // add al, al - LONG $0x40244402 // add al, byte [rsp + 64] - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x28 // movzx edx, byte [rsp + 40] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x38 // movzx edx, byte [rsp + 56] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000f024948b48 // mov rdx, qword [rsp + 240] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x18 // movzx ebx, byte [rsp + 24] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xc900 // add cl, cl - LONG $0x40248c02; WORD $0x0001; BYTE $0x00 // add cl, byte [rsp + 320] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000100248cb60f // movzx ecx, byte [rsp + 256] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c28348 // add rdx, 4 QUAD $0x000000f024948948 // mov qword [rsp + 240], rdx - LONG $0x24448348; WORD $0xff78 // add qword [rsp + 120], -1 - JNE LBB7_171 + QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 + JNE LBB7_162 QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] + QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_174 - JMP LBB7_192 + JL LBB7_165 + JMP LBB7_191 -LBB7_37: +LBB7_171: + LONG $0x1f728d4d // lea r14, [r10 + 31] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xf2490f4d // cmovns r14, r10 + LONG $0x07418d41 // lea eax, [r9 + 7] + WORD $0x8545; BYTE $0xc9 // test r9d, r9d + LONG $0xc1490f41 // cmovns eax, r9d + WORD $0xe083; BYTE $0xf8 // and eax, -8 + LONG $0x0210fac5 // vmovss xmm0, dword [rdx] + WORD $0x2941; BYTE $0xc1 // sub r9d, eax + JE LBB7_175 + WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + +LBB7_173: + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] + LONG $0x04c68348 // add rsi, 4 + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0xdaf6 // neg dl + LONG $0x07788d48 // lea rdi, [rax + 7] + WORD $0x8548; BYTE $0xc0 // test rax, rax + LONG $0xf8490f48 // cmovns rdi, rax + LONG $0x03ffc148 // sar rdi, 3 + LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] + WORD $0x3044; BYTE $0xca // xor dl, r9b + QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] + WORD $0xc189 // mov ecx, eax + WORD $0x2944; BYTE $0xc1 // sub ecx, r8d + LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 + WORD $0xe3d3 // shl ebx, cl + WORD $0xd320 // and bl, dl + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x3b1c8841 // mov byte [r11 + rdi], bl + LONG $0x01c08348 // add rax, 1 + LONG $0x08f88348 // cmp rax, 8 + JNE LBB7_173 + LONG $0x01c38349 // add r11, 1 + +LBB7_175: + LONG $0x05fec149 // sar r14, 5 + LONG $0x20fa8349 // cmp r10, 32 + JL LBB7_179 + QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 + QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 + LONG $0x2474894c; BYTE $0x78 // mov qword [rsp + 120], r14 + QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 + +LBB7_177: + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] + LONG $0x5610fac5; BYTE $0x04 // vmovss xmm2, dword [rsi + 4] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + QUAD $0x000001202494970f // seta byte [rsp + 288] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0x4e10fac5; BYTE $0x08 // vmovss xmm1, dword [rsi + 8] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x0c // vmovss xmm1, dword [rsi + 12] + LONG $0xd5970f41 // seta r13b + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0xd4970f41 // seta r12b + LONG $0x4e10fac5; BYTE $0x10 // vmovss xmm1, dword [rsi + 16] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x14 // vmovss xmm1, dword [rsi + 20] + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0x4e10fac5; BYTE $0x18 // vmovss xmm1, dword [rsi + 24] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x1c // vmovss xmm1, dword [rsi + 28] + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0x4e10fac5; BYTE $0x20 // vmovss xmm1, dword [rsi + 32] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x24 // vmovss xmm1, dword [rsi + 36] + QUAD $0x000001002494970f // seta byte [rsp + 256] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + LONG $0x4e10fac5; BYTE $0x28 // vmovss xmm1, dword [rsi + 40] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x2c // vmovss xmm1, dword [rsi + 44] + LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + QUAD $0x000001402494970f // seta byte [rsp + 320] + LONG $0x4e10fac5; BYTE $0x30 // vmovss xmm1, dword [rsi + 48] + LONG $0x5e10fac5; BYTE $0x34 // vmovss xmm3, dword [rsi + 52] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x6e10fac5; BYTE $0x38 // vmovss xmm5, dword [rsi + 56] + LONG $0x7610fac5; BYTE $0x3c // vmovss xmm6, dword [rsi + 60] + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + LONG $0x46107ac5; BYTE $0x40 // vmovss xmm8, dword [rsi + 64] + LONG $0x4e107ac5; BYTE $0x44 // vmovss xmm9, dword [rsi + 68] + LONG $0xd82ef8c5 // vucomiss xmm3, xmm0 + LONG $0x56107ac5; BYTE $0x48 // vmovss xmm10, dword [rsi + 72] + LONG $0x5e107ac5; BYTE $0x4c // vmovss xmm11, dword [rsi + 76] + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + LONG $0x66107ac5; BYTE $0x50 // vmovss xmm12, dword [rsi + 80] + LONG $0x6e107ac5; BYTE $0x54 // vmovss xmm13, dword [rsi + 84] + LONG $0xe82ef8c5 // vucomiss xmm5, xmm0 + LONG $0x76107ac5; BYTE $0x58 // vmovss xmm14, dword [rsi + 88] + LONG $0x5610fac5; BYTE $0x5c // vmovss xmm2, dword [rsi + 92] + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] + LONG $0x5e10fac5; BYTE $0x60 // vmovss xmm3, dword [rsi + 96] + LONG $0x6610fac5; BYTE $0x64 // vmovss xmm4, dword [rsi + 100] + LONG $0xf02ef8c5 // vucomiss xmm6, xmm0 + LONG $0x7610fac5; BYTE $0x68 // vmovss xmm6, dword [rsi + 104] + LONG $0x7e10fac5; BYTE $0x6c // vmovss xmm7, dword [rsi + 108] + LONG $0xd3970f41 // seta r11b + LONG $0x4e10fac5; BYTE $0x70 // vmovss xmm1, dword [rsi + 112] + LONG $0x6e10fac5; BYTE $0x74 // vmovss xmm5, dword [rsi + 116] + LONG $0xc02e78c5 // vucomiss xmm8, xmm0 + QUAD $0x000000802494970f // seta byte [rsp + 128] + LONG $0xc82e78c5 // vucomiss xmm9, xmm0 + WORD $0x970f; BYTE $0xd1 // seta cl + LONG $0xd02e78c5 // vucomiss xmm10, xmm0 + LONG $0xd7970f40 // seta dil + LONG $0xd82e78c5 // vucomiss xmm11, xmm0 + LONG $0xd0970f41 // seta r8b + LONG $0xe02e78c5 // vucomiss xmm12, xmm0 + LONG $0xd2970f41 // seta r10b + LONG $0xe82e78c5 // vucomiss xmm13, xmm0 + LONG $0xd6970f41 // seta r14b + LONG $0xf02e78c5 // vucomiss xmm14, xmm0 + LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0xd1970f41 // seta r9b + LONG $0xd82ef8c5 // vucomiss xmm3, xmm0 + LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] + LONG $0xe02ef8c5 // vucomiss xmm4, xmm0 + LONG $0xd7970f41 // seta r15b + LONG $0xf02ef8c5 // vucomiss xmm6, xmm0 + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0xf82ef8c5 // vucomiss xmm7, xmm0 + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0xe82ef8c5 // vucomiss xmm5, xmm0 + LONG $0x4e10fac5; BYTE $0x78 // vmovss xmm1, dword [rsi + 120] + LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454970f; BYTE $0x04 // seta byte [rsp + 4] + LONG $0x4e10fac5; BYTE $0x7c // vmovss xmm1, dword [rsi + 124] + LONG $0x80ee8348 // sub rsi, -128 + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0xdb00 // add bl, bl + LONG $0x20249c02; WORD $0x0001; BYTE $0x00 // add bl, byte [rsp + 288] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + WORD $0x8941; BYTE $0xdc // mov r12d, ebx + LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + LONG $0x64b60f44; WORD $0x3024 // movzx r12d, byte [rsp + 48] + LONG $0x06e4c041 // shl r12b, 6 + LONG $0x6cb60f44; WORD $0x1824 // movzx r13d, byte [rsp + 24] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + WORD $0xc000 // add al, al + LONG $0x00248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 256] + LONG $0x64b60f44; WORD $0x1024 // movzx r12d, byte [rsp + 16] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x8944; BYTE $0xe0 // mov eax, r12d + QUAD $0x00014024a4b60f44; BYTE $0x00 // movzx r12d, byte [rsp + 320] + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0xc308 // or bl, al + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e3c041 // shl r11b, 7 + WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0xc900 // add cl, cl + LONG $0x80248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 128] + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x04e2c041 // shl r10b, 4 + WORD $0x0845; BYTE $0xc2 // or r10b, r8b + LONG $0x05e6c041 // shl r14b, 5 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0841; BYTE $0xdb // or r11b, bl + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e1c041 // shl r9b, 7 + WORD $0x0841; BYTE $0xc1 // or r9b, al + QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] + WORD $0x8844; BYTE $0x28 // mov byte [rax], r13b + WORD $0x0845; BYTE $0xf1 // or r9b, r14b + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x58 // add r15b, byte [rsp + 88] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0x0844; BYTE $0xf9 // or cl, r15b + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xd908 // or cl, bl + LONG $0x01588844 // mov byte [rax + 1], r11b + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + LONG $0x02488844 // mov byte [rax + 2], r9b + WORD $0xca08 // or dl, cl + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl + LONG $0x04c08348 // add rax, 4 + QUAD $0x000000f024848948 // mov qword [rsp + 240], rax + LONG $0x24448348; WORD $0xff78 // add qword [rsp + 120], -1 + JNE LBB7_177 + QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] + QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] + QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] + +LBB7_179: + LONG $0x05e6c149 // shl r14, 5 + WORD $0x394d; BYTE $0xd6 // cmp r14, r10 + JGE LBB7_191 + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0x294d; BYTE $0xf0 // sub r8, r14 + WORD $0xf749; BYTE $0xd6 // not r14 + WORD $0x014d; BYTE $0xd6 // add r14, r10 + JNE LBB7_184 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB7_182 + +LBB7_42: WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -37888,10 +39234,10 @@ LBB7_37: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_41 + JE LBB7_46 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB7_39: +LBB7_44: WORD $0x3a44; BYTE $0x36 // cmp r14b, byte [rsi] LONG $0x01768d48 // lea rsi, [rsi + 1] WORD $0xd219 // sbb edx, edx @@ -37911,38 +39257,38 @@ LBB7_39: LONG $0x3b1c8841 // mov byte [r11 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_39 + JNE LBB7_44 LONG $0x01c38349 // add r11, 1 -LBB7_41: +LBB7_46: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB7_42 + JL LBB7_47 LONG $0x20ff8349 // cmp r15, 32 LONG $0x24748944; BYTE $0x04 // mov dword [rsp + 4], r14d QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 QUAD $0x0000017824bc894c // mov qword [rsp + 376], r15 - JB LBB7_44 + JB LBB7_49 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB7_47 + JAE LBB7_52 LONG $0xbb048d4b // lea rax, [r11 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB7_47 + JAE LBB7_52 -LBB7_44: +LBB7_49: WORD $0xc031 // xor eax, eax QUAD $0x0000016824848948 // mov qword [rsp + 360], rax WORD $0x8949; BYTE $0xf4 // mov r12, rsi QUAD $0x00000160249c894c // mov qword [rsp + 352], r11 -LBB7_50: +LBB7_55: QUAD $0x0000016824bc2b4c // sub r15, qword [rsp + 360] LONG $0x247c894c; BYTE $0x78 // mov qword [rsp + 120], r15 -LBB7_51: +LBB7_56: WORD $0x894c; BYTE $0xe1 // mov rcx, r12 LONG $0x24343845 // cmp byte [r12], r14b QUAD $0x000001402494970f // seta byte [rsp + 320] @@ -38105,12 +39451,12 @@ LBB7_51: LONG $0x04c68348 // add rsi, 4 QUAD $0x0000016024b48948 // mov qword [rsp + 352], rsi LONG $0x24448348; WORD $0xff78 // add qword [rsp + 120], -1 - JNE LBB7_51 + JNE LBB7_56 QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] QUAD $0x0000017824bc8b4c // mov r15, qword [rsp + 376] - JMP LBB7_53 + JMP LBB7_58 -LBB7_122: +LBB7_129: WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -38120,10 +39466,10 @@ LBB7_122: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_126 + JE LBB7_133 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB7_124: +LBB7_131: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x04768d48 // lea rsi, [rsi + 4] WORD $0x9f0f; BYTE $0xd2 // setg dl @@ -38144,18 +39490,18 @@ LBB7_124: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_124 + JNE LBB7_131 LONG $0x01c38349 // add r11, 1 -LBB7_126: +LBB7_133: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB7_127 + JL LBB7_134 QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 -LBB7_129: +LBB7_136: QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] @@ -38310,174 +39656,249 @@ LBB7_129: LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c38349 // add r11, 4 QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 - JNE LBB7_129 + JNE LBB7_136 WORD $0x894d; BYTE $0xde // mov r14, r11 QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB7_132 - JMP LBB7_192 + JL LBB7_139 + JMP LBB7_191 LBB7_18: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 - -LBB7_116: - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf8 // sub r8, r15 - WORD $0xf749; BYTE $0xd7 // not r15 - WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_120 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_118 - -LBB7_36: - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JGE LBB7_191 -LBB7_184: +LBB7_123: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_186 + JNE LBB7_127 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_188 + JMP LBB7_125 LBB7_9: QUAD $0x00000160249c894c // mov qword [rsp + 352], r11 WORD $0x8949; BYTE $0xf4 // mov r12, rsi -LBB7_71: +LBB7_76: LONG $0x05e5c149 // shl r13, 5 WORD $0x394d; BYTE $0xfd // cmp r13, r15 - JGE LBB7_192 + JGE LBB7_191 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xe8 // sub r8, r13 WORD $0xf749; BYTE $0xd5 // not r13 WORD $0x014d; BYTE $0xfd // add r13, r15 - JNE LBB7_74 + JNE LBB7_79 WORD $0xf631 // xor esi, esi - JMP LBB7_77 + JMP LBB7_82 LBB7_27: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JGE LBB7_191 -LBB7_142: +LBB7_149: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_146 + JNE LBB7_153 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_144 - -LBB7_84: - WORD $0x894d; BYTE $0xde // mov r14, r11 - LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JMP LBB7_151 LBB7_89: - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf8 // sub r8, r15 - WORD $0xf749; BYTE $0xd7 // not r15 - WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_93 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_91 - -LBB7_100: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JGE LBB7_191 -LBB7_105: +LBB7_94: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_110 + JNE LBB7_100 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_107 + JMP LBB7_96 -LBB7_153: +LBB7_107: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JGE LBB7_191 -LBB7_158: +LBB7_112: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_162 + JNE LBB7_117 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_160 + JMP LBB7_114 -LBB7_169: +LBB7_160: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JGE LBB7_191 -LBB7_174: +LBB7_165: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_178 + JNE LBB7_169 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_176 + JMP LBB7_167 -LBB7_42: +LBB7_47: QUAD $0x00000160249c894c // mov qword [rsp + 352], r11 WORD $0x8949; BYTE $0xf4 // mov r12, rsi -LBB7_53: +LBB7_58: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JGE LBB7_191 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_56 + JNE LBB7_61 WORD $0xc031 // xor eax, eax - JMP LBB7_59 + JMP LBB7_64 -LBB7_127: +LBB7_134: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB7_192 + JGE LBB7_191 -LBB7_132: +LBB7_139: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB7_136 + JNE LBB7_143 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_134 + JMP LBB7_141 -LBB7_120: +LBB7_186: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + WORD $0x894d; BYTE $0xde // mov r14, r11 + +LBB7_187: + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xd7 // mov rdi, r10 + LONG $0x03efc148 // shr rdi, 3 + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + LONG $0x14b60f41; BYTE $0x3e // movzx edx, byte [r14 + rdi] + WORD $0xd030 // xor al, dl + WORD $0xc320 // and bl, al + WORD $0xd330 // xor bl, dl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x02c28349 // add r10, 2 + LONG $0x4e10fbc5; BYTE $0x08 // vmovsd xmm1, qword [rsi + 8] + LONG $0x10c68348 // add rsi, 16 + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0xd830 // xor al, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b2 // mov dl, 1 + WORD $0xe2d2 // shl dl, cl + WORD $0xc220 // and dl, al + WORD $0xda30 // xor dl, bl + LONG $0x3e148841 // mov byte [r14 + rdi], dl + WORD $0x394d; BYTE $0xd1 // cmp r9, r10 + JNE LBB7_187 + +LBB7_188: + LONG $0x01c0f641 // test r8b, 1 + JE LBB7_191 + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + JMP LBB7_190 + +LBB7_184: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + WORD $0x894d; BYTE $0xde // mov r14, r11 + +LBB7_185: + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xd7 // mov rdi, r10 + LONG $0x03efc148 // shr rdi, 3 + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + LONG $0x14b60f41; BYTE $0x3e // movzx edx, byte [r14 + rdi] + WORD $0xd030 // xor al, dl + WORD $0xc320 // and bl, al + WORD $0xd330 // xor bl, dl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x02c28349 // add r10, 2 + LONG $0x4e10fac5; BYTE $0x04 // vmovss xmm1, dword [rsi + 4] + LONG $0x08c68348 // add rsi, 8 + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0xd830 // xor al, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b2 // mov dl, 1 + WORD $0xe2d2 // shl dl, cl + WORD $0xc220 // and dl, al + WORD $0xda30 // xor dl, bl + LONG $0x3e148841 // mov byte [r14 + rdi], dl + WORD $0x394d; BYTE $0xd1 // cmp r9, r10 + JNE LBB7_185 + +LBB7_182: + LONG $0x01c0f641 // test r8b, 1 + JE LBB7_191 + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + +LBB7_190: + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xd2 // mov rdx, r10 + LONG $0x03eac148 // shr rdx, 3 + LONG $0x13348a41 // mov sil, byte [r11 + rdx] + LONG $0x07e28041 // and r10b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf0 // xor al, sil + WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x131c8841 // mov byte [r11 + rdx], bl + JMP LBB7_191 + +LBB7_127: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB7_121: +LBB7_128: WORD $0x3b44; BYTE $0x2e // cmp r13d, dword [rsi] WORD $0xff19 // sbb edi, edi WORD $0x894c; BYTE $0xda // mov rdx, r11 @@ -38503,61 +39924,21 @@ LBB7_121: WORD $0xc330 // xor bl, al LONG $0x171c8841 // mov byte [r15 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB7_121 + JNE LBB7_128 -LBB7_118: +LBB7_125: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 WORD $0x3b44; BYTE $0x2e // cmp r13d, dword [rsi] - JMP LBB7_190 + JMP LBB7_98 -LBB7_186: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d - WORD $0x894d; BYTE $0xf7 // mov r15, r14 - -LBB7_187: - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - WORD $0xc019 // sbb eax, eax - WORD $0x894c; BYTE $0xdf // mov rdi, r11 - LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3f1c8841 // mov byte [r15 + rdi], bl - LONG $0x02c38349 // add r11, 2 - LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] - LONG $0x10768d48 // lea rsi, [rsi + 16] - WORD $0xc019 // sbb eax, eax - WORD $0xd830 // xor al, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0xc220 // and dl, al - WORD $0xda30 // xor dl, bl - LONG $0x3f148841 // mov byte [r15 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_187 - -LBB7_188: - LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - JMP LBB7_190 - -LBB7_74: +LBB7_79: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi QUAD $0x00000160249c8b4c // mov r11, qword [rsp + 352] -LBB7_75: +LBB7_80: LONG $0x34343845 // cmp byte [r12 + rsi], r14b WORD $0x9f0f; BYTE $0xd3 // setg bl WORD $0xdbf6 // neg bl @@ -38584,12 +39965,12 @@ LBB7_75: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB7_75 + JNE LBB7_80 WORD $0x0149; BYTE $0xf4 // add r12, rsi -LBB7_77: +LBB7_82: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 LONG $0x24343845 // cmp byte [r12], r14b WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al @@ -38605,15 +39986,15 @@ LBB7_77: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB7_192 + JMP LBB7_191 -LBB7_146: +LBB7_153: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB7_147: +LBB7_154: WORD $0x3b4c; BYTE $0x2e // cmp r13, qword [rsi] WORD $0xff19 // sbb edi, edi WORD $0x894c; BYTE $0xda // mov rdx, r11 @@ -38639,21 +40020,21 @@ LBB7_147: WORD $0xc330 // xor bl, al LONG $0x171c8841 // mov byte [r15 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB7_147 + JNE LBB7_154 -LBB7_144: +LBB7_151: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 WORD $0x3b4c; BYTE $0x2e // cmp r13, qword [rsi] - JMP LBB7_190 + JMP LBB7_98 -LBB7_93: +LBB7_100: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB7_94: +LBB7_101: LONG $0x2e3b4466 // cmp r13w, word [rsi] WORD $0xff19 // sbb edi, edi WORD $0x894c; BYTE $0xda // mov rdx, r11 @@ -38679,21 +40060,33 @@ LBB7_94: WORD $0xc330 // xor bl, al LONG $0x171c8841 // mov byte [r15 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB7_94 + JNE LBB7_101 -LBB7_91: +LBB7_96: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 LONG $0x2e3b4466 // cmp r13w, word [rsi] - JMP LBB7_190 -LBB7_110: +LBB7_98: + WORD $0xc019 // sbb eax, eax + WORD $0x894c; BYTE $0xda // mov rdx, r11 + LONG $0x03eac148 // shr rdx, 3 + LONG $0x16348a41 // mov sil, byte [r14 + rdx] + LONG $0x07e38041 // and r11b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf0 // xor al, sil + WORD $0xc320 // and bl, al + JMP LBB7_99 + +LBB7_117: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB7_111: +LBB7_118: LONG $0x2e394466 // cmp word [rsi], r13w WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al @@ -38721,21 +40114,21 @@ LBB7_111: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_111 + JNE LBB7_118 -LBB7_107: +LBB7_114: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 LONG $0x2e394466 // cmp word [rsi], r13w - JMP LBB7_109 + JMP LBB7_116 -LBB7_162: +LBB7_169: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB7_163: +LBB7_170: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al @@ -38763,73 +40156,21 @@ LBB7_163: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_163 + JNE LBB7_170 -LBB7_160: +LBB7_167: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - JMP LBB7_109 - -LBB7_178: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d - WORD $0x894d; BYTE $0xf7 // mov r15, r14 + JMP LBB7_116 -LBB7_179: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - WORD $0xc019 // sbb eax, eax - WORD $0x894c; BYTE $0xdf // mov rdi, r11 - LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3f1c8841 // mov byte [r15 + rdi], bl - LONG $0x02c38349 // add r11, 2 - LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] - LONG $0x08768d48 // lea rsi, [rsi + 8] - WORD $0xc019 // sbb eax, eax - WORD $0xd830 // xor al, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0xc220 // and dl, al - WORD $0xda30 // xor dl, bl - LONG $0x3f148841 // mov byte [r15 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_179 - -LBB7_176: - LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - -LBB7_190: - WORD $0xc019 // sbb eax, eax - WORD $0x894c; BYTE $0xda // mov rdx, r11 - LONG $0x03eac148 // shr rdx, 3 - LONG $0x16348a41 // mov sil, byte [r14 + rdx] - LONG $0x07e38041 // and r11b, 7 - WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d - WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf0 // xor al, sil - WORD $0xc320 // and bl, al - JMP LBB7_191 - -LBB7_56: +LBB7_61: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xc031 // xor eax, eax QUAD $0x0000016024948b4c // mov r10, qword [rsp + 352] -LBB7_57: +LBB7_62: LONG $0x04343a45 // cmp r14b, byte [r12 + rax] WORD $0xf619 // sbb esi, esi WORD $0x8948; BYTE $0xc7 // mov rdi, rax @@ -38854,12 +40195,12 @@ LBB7_57: WORD $0xd330 // xor bl, dl LONG $0x3a1c8841 // mov byte [r10 + rdi], bl WORD $0x3949; BYTE $0xc1 // cmp r9, rax - JNE LBB7_57 + JNE LBB7_62 WORD $0x0149; BYTE $0xc4 // add r12, rax -LBB7_59: +LBB7_64: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 LONG $0x24343a45 // cmp r14b, byte [r12] WORD $0xd219 // sbb edx, edx WORD $0x8948; BYTE $0xc6 // mov rsi, rax @@ -38874,15 +40215,15 @@ LBB7_59: WORD $0xd320 // and bl, dl WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x301c8841 // mov byte [r8 + rsi], bl - JMP LBB7_192 + JMP LBB7_191 -LBB7_136: +LBB7_143: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB7_137: +LBB7_144: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al @@ -38910,14 +40251,14 @@ LBB7_137: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_137 + JNE LBB7_144 -LBB7_134: +LBB7_141: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_192 + JE LBB7_191 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d -LBB7_109: +LBB7_116: WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 @@ -38930,16 +40271,16 @@ LBB7_109: WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al -LBB7_191: +LBB7_99: WORD $0x3040; BYTE $0xf3 // xor bl, sil LONG $0x161c8841 // mov byte [r14 + rdx], bl -LBB7_192: +LBB7_191: MOVQ 1344(SP), SP VZEROUPPER RET -LBB7_65: +LBB7_70: LONG $0xe0e58349 // and r13, -32 WORD $0x894c; BYTE $0xe8 // mov rax, r13 LONG $0x05e0c148 // shl rax, 5 @@ -38954,7 +40295,7 @@ LBB7_65: WORD $0xc031 // xor eax, eax QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB7_66: +LBB7_71: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000017024848948 // mov qword [rsp + 368], rax LONG $0x05e3c148 // shl rbx, 5 @@ -41078,16 +42419,16 @@ LBB7_66: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB7_66 + JNE LBB7_71 QUAD $0x0000016824ac8b4c // mov r13, qword [rsp + 360] QUAD $0x0000018024ac3b4c // cmp r13, qword [rsp + 384] QUAD $0x000000f824bc8b4c // mov r15, qword [rsp + 248] LONG $0x24748b44; BYTE $0x04 // mov r14d, dword [rsp + 4] QUAD $0x0000024024a48b4c // mov r12, qword [rsp + 576] - JNE LBB7_68 - JMP LBB7_71 + JNE LBB7_73 + JMP LBB7_76 -LBB7_47: +LBB7_52: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 @@ -41103,7 +42444,7 @@ LBB7_47: QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 QUAD $0x00024024b46f7dc5; BYTE $0x00 // vmovdqa ymm14, yword [rsp + 576] -LBB7_48: +LBB7_53: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000017024848948 // mov qword [rsp + 368], rax LONG $0x05e3c148 // shl rbx, 5 @@ -43275,14 +44616,14 @@ LBB7_48: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000168248c3b48 // cmp rcx, qword [rsp + 360] - JNE LBB7_48 + JNE LBB7_53 QUAD $0x0000017824bc8b4c // mov r15, qword [rsp + 376] QUAD $0x0000016824bc3b4c // cmp r15, qword [rsp + 360] QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] LONG $0x24748b44; BYTE $0x04 // mov r14d, dword [rsp + 4] QUAD $0x000001f824a48b4c // mov r12, qword [rsp + 504] - JNE LBB7_50 - JMP LBB7_53 + JNE LBB7_55 + JMP LBB7_58 DATA LCDATA6<>+0x000(SB)/8, $0x0101010101010101 DATA LCDATA6<>+0x008(SB)/8, $0x0101010101010101 @@ -53149,15 +54490,15 @@ TEXT ·_comparison_greater_equal_arr_scalar_avx2(SB), $1384-48 WORD $0x894d; BYTE $0xc2 // mov r10, r8 WORD $0x8949; BYTE $0xcb // mov r11, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JG LBB10_13 + JG LBB10_15 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JLE LBB10_25 + JLE LBB10_2 WORD $0xff83; BYTE $0x04 // cmp edi, 4 - JE LBB10_48 + JE LBB10_81 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB10_56 + JE LBB10_97 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB10_175 + JNE LBB10_194 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -53167,10 +54508,10 @@ TEXT ·_comparison_greater_equal_arr_scalar_avx2(SB), $1384-48 LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_9 + JE LBB10_13 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_7: +LBB10_11: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x04768d48 // lea rsi, [rsi + 4] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -53191,23 +54532,23 @@ LBB10_7: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_7 + JNE LBB10_11 LONG $0x01c38349 // add r11, 1 -LBB10_9: +LBB10_13: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_100 + JL LBB10_14 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB10_11: +LBB10_115: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x000000982494930f // setae byte [rsp + 152] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7930f40 // setae dil + LONG $0xd2930f41 // setae r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6930f41 // setae r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -53223,11 +54564,11 @@ LBB10_11: LONG $0x206e3944 // cmp dword [rsi + 32], r13d LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0xd7930f40 // setae dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1930f41 // setae r9b + LONG $0xd0930f41 // setae r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2930f41 // setae r10b + LONG $0xd1930f41 // setae r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3930f41 // setae r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -53267,67 +54608,68 @@ LBB10_11: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454930f; BYTE $0x1c // setae byte [rsp + 28] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0930f41 // setae r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x930f; BYTE $0xd2 // setae dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x70 // add dil, byte [rsp + 112] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xc900 // add cl, cl @@ -53350,33 +54692,33 @@ LBB10_11: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB10_11 + JNE LBB10_115 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_101 - JMP LBB10_175 + JL LBB10_118 + JMP LBB10_194 -LBB10_13: +LBB10_15: WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JLE LBB10_38 + JLE LBB10_16 WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB10_64 + JE LBB10_150 WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB10_72 + JE LBB10_166 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB10_175 + JNE LBB10_194 LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -53386,14 +54728,15 @@ LBB10_13: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x0210fbc5 // vmovsd xmm0, qword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_21 + JE LBB10_31 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_19: - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - WORD $0x960f; BYTE $0xd2 // setbe dl +LBB10_29: + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] LONG $0x08c68348 // add rsi, 8 - WORD $0xdaf6 // neg dl + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x000000ba; BYTE $0x00 // mov edx, 0 + WORD $0xd280; BYTE $0xff // adc dl, -1 LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax @@ -53410,186 +54753,214 @@ LBB10_19: LONG $0x3b1c8841 // mov byte [r11 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_19 + JNE LBB10_29 LONG $0x01c38349 // add r11, 1 -LBB10_21: +LBB10_31: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_103 + JL LBB10_32 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB10_23: - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - QUAD $0x000000a02494960f // setbe byte [rsp + 160] - LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] - LONG $0xd1960f41 // setbe r9b - LONG $0x462ef9c5; BYTE $0x10 // vucomisd xmm0, qword [rsi + 16] - LONG $0xd6960f41 // setbe r14b - LONG $0x462ef9c5; BYTE $0x18 // vucomisd xmm0, qword [rsi + 24] - LONG $0xd5960f41 // setbe r13b - LONG $0x462ef9c5; BYTE $0x20 // vucomisd xmm0, qword [rsi + 32] - QUAD $0x000000902494960f // setbe byte [rsp + 144] - LONG $0x462ef9c5; BYTE $0x28 // vucomisd xmm0, qword [rsi + 40] - LONG $0x2454960f; BYTE $0x60 // setbe byte [rsp + 96] - LONG $0x462ef9c5; BYTE $0x30 // vucomisd xmm0, qword [rsi + 48] - WORD $0x960f; BYTE $0xd0 // setbe al - LONG $0x462ef9c5; BYTE $0x38 // vucomisd xmm0, qword [rsi + 56] - WORD $0x960f; BYTE $0xd3 // setbe bl - LONG $0x462ef9c5; BYTE $0x40 // vucomisd xmm0, qword [rsi + 64] - LONG $0x2454960f; BYTE $0x78 // setbe byte [rsp + 120] - LONG $0x462ef9c5; BYTE $0x48 // vucomisd xmm0, qword [rsi + 72] - WORD $0x960f; BYTE $0xd2 // setbe dl - LONG $0x462ef9c5; BYTE $0x50 // vucomisd xmm0, qword [rsi + 80] - LONG $0xd7960f40 // setbe dil - LONG $0x462ef9c5; BYTE $0x58 // vucomisd xmm0, qword [rsi + 88] - LONG $0xd2960f41 // setbe r10b - LONG $0x462ef9c5; BYTE $0x60 // vucomisd xmm0, qword [rsi + 96] - LONG $0xd3960f41 // setbe r11b - LONG $0x462ef9c5; BYTE $0x68 // vucomisd xmm0, qword [rsi + 104] - LONG $0xd4960f41 // setbe r12b - LONG $0x462ef9c5; BYTE $0x70 // vucomisd xmm0, qword [rsi + 112] - QUAD $0x000000802494960f // setbe byte [rsp + 128] - LONG $0x462ef9c5; BYTE $0x78 // vucomisd xmm0, qword [rsi + 120] - WORD $0x960f; BYTE $0xd1 // setbe cl - QUAD $0x00000080862ef9c5 // vucomisd xmm0, qword [rsi + 128] - LONG $0x2454960f; BYTE $0x50 // setbe byte [rsp + 80] - QUAD $0x00000088862ef9c5 // vucomisd xmm0, qword [rsi + 136] - LONG $0x2454960f; BYTE $0x70 // setbe byte [rsp + 112] - QUAD $0x00000090862ef9c5 // vucomisd xmm0, qword [rsi + 144] - QUAD $0x000000882494960f // setbe byte [rsp + 136] - QUAD $0x00000098862ef9c5 // vucomisd xmm0, qword [rsi + 152] - LONG $0x2454960f; BYTE $0x48 // setbe byte [rsp + 72] - QUAD $0x000000a0862ef9c5 // vucomisd xmm0, qword [rsi + 160] - LONG $0x2454960f; BYTE $0x58 // setbe byte [rsp + 88] - QUAD $0x000000a8862ef9c5 // vucomisd xmm0, qword [rsi + 168] - LONG $0x2454960f; BYTE $0x68 // setbe byte [rsp + 104] - QUAD $0x000000b0862ef9c5 // vucomisd xmm0, qword [rsi + 176] - LONG $0x2454960f; BYTE $0x40 // setbe byte [rsp + 64] - QUAD $0x000000b8862ef9c5 // vucomisd xmm0, qword [rsi + 184] - LONG $0xd7960f41 // setbe r15b - QUAD $0x000000c0862ef9c5 // vucomisd xmm0, qword [rsi + 192] - LONG $0x2454960f; BYTE $0x20 // setbe byte [rsp + 32] - QUAD $0x000000c8862ef9c5 // vucomisd xmm0, qword [rsi + 200] - LONG $0x2454960f; BYTE $0x28 // setbe byte [rsp + 40] - QUAD $0x000000d0862ef9c5 // vucomisd xmm0, qword [rsi + 208] - LONG $0x2454960f; BYTE $0x30 // setbe byte [rsp + 48] - QUAD $0x000000d8862ef9c5 // vucomisd xmm0, qword [rsi + 216] - LONG $0x2454960f; BYTE $0x38 // setbe byte [rsp + 56] - QUAD $0x000000e0862ef9c5 // vucomisd xmm0, qword [rsi + 224] - QUAD $0x000001402494960f // setbe byte [rsp + 320] - QUAD $0x000000e8862ef9c5 // vucomisd xmm0, qword [rsi + 232] - QUAD $0x000001202494960f // setbe byte [rsp + 288] - QUAD $0x000000f0862ef9c5 // vucomisd xmm0, qword [rsi + 240] - LONG $0x2454960f; BYTE $0x1c // setbe byte [rsp + 28] - QUAD $0x000000f8862ef9c5 // vucomisd xmm0, qword [rsi + 248] - LONG $0xd0960f41 // setbe r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x000000a0248c0244 // add r9b, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b +LBB10_183: + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] + LONG $0x5610fbc5; BYTE $0x08 // vmovsd xmm2, qword [rsi + 8] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454930f; BYTE $0x78 // setae byte [rsp + 120] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0x4e10fbc5; BYTE $0x10 // vmovsd xmm1, qword [rsi + 16] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0xd7930f40 // setae dil + LONG $0x4e10fbc5; BYTE $0x18 // vmovsd xmm1, qword [rsi + 24] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0xd0930f41 // setae r8b + LONG $0x4e10fbc5; BYTE $0x20 // vmovsd xmm1, qword [rsi + 32] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0xd1930f41 // setae r9b + LONG $0x4e10fbc5; BYTE $0x28 // vmovsd xmm1, qword [rsi + 40] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000a02494930f // setae byte [rsp + 160] + LONG $0x4e10fbc5; BYTE $0x30 // vmovsd xmm1, qword [rsi + 48] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000802494930f // setae byte [rsp + 128] + LONG $0x4e10fbc5; BYTE $0x38 // vmovsd xmm1, qword [rsi + 56] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + LONG $0x4e10fbc5; BYTE $0x40 // vmovsd xmm1, qword [rsi + 64] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] + LONG $0x4e10fbc5; BYTE $0x48 // vmovsd xmm1, qword [rsi + 72] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0xd2930f41 // setae r10b + LONG $0x4e10fbc5; BYTE $0x50 // vmovsd xmm1, qword [rsi + 80] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + WORD $0x930f; BYTE $0xd3 // setae bl + LONG $0x4e10fbc5; BYTE $0x58 // vmovsd xmm1, qword [rsi + 88] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0xd7930f41 // setae r15b + LONG $0x4e10fbc5; BYTE $0x60 // vmovsd xmm1, qword [rsi + 96] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + WORD $0x930f; BYTE $0xd0 // setae al + LONG $0x4e10fbc5; BYTE $0x68 // vmovsd xmm1, qword [rsi + 104] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0x4e10fbc5; BYTE $0x70 // vmovsd xmm1, qword [rsi + 112] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454930f; BYTE $0x58 // setae byte [rsp + 88] + LONG $0x4e10fbc5; BYTE $0x78 // vmovsd xmm1, qword [rsi + 120] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454930f; BYTE $0x48 // setae byte [rsp + 72] + QUAD $0x000000808e10fbc5 // vmovsd xmm1, qword [rsi + 128] + QUAD $0x000000889610fbc5 // vmovsd xmm2, qword [rsi + 136] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000908e10fbc5 // vmovsd xmm1, qword [rsi + 144] + LONG $0x2454930f; BYTE $0x28 // setae byte [rsp + 40] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + QUAD $0x000000989610fbc5 // vmovsd xmm2, qword [rsi + 152] + LONG $0xd3930f41 // setae r11b + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000a08e10fbc5 // vmovsd xmm1, qword [rsi + 160] + LONG $0xd6930f41 // setae r14b + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + QUAD $0x000000a89610fbc5 // vmovsd xmm2, qword [rsi + 168] + LONG $0xd4930f41 // setae r12b + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000b08e10fbc5 // vmovsd xmm1, qword [rsi + 176] + QUAD $0x000000882494930f // setae byte [rsp + 136] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + QUAD $0x000000b89610fbc5 // vmovsd xmm2, qword [rsi + 184] + LONG $0x2454930f; BYTE $0x50 // setae byte [rsp + 80] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000c08e10fbc5 // vmovsd xmm1, qword [rsi + 192] + LONG $0x2454930f; BYTE $0x68 // setae byte [rsp + 104] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + QUAD $0x000000c89610fbc5 // vmovsd xmm2, qword [rsi + 200] + LONG $0xd5930f41 // setae r13b + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000d08e10fbc5 // vmovsd xmm1, qword [rsi + 208] + QUAD $0x000001202494930f // setae byte [rsp + 288] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + QUAD $0x000000d89610fbc5 // vmovsd xmm2, qword [rsi + 216] + LONG $0x2454930f; BYTE $0x40 // setae byte [rsp + 64] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000e08e10fbc5 // vmovsd xmm1, qword [rsi + 224] + LONG $0x2454930f; BYTE $0x30 // setae byte [rsp + 48] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + QUAD $0x000000e89610fbc5 // vmovsd xmm2, qword [rsi + 232] + LONG $0x2454930f; BYTE $0x38 // setae byte [rsp + 56] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + QUAD $0x000000f08e10fbc5 // vmovsd xmm1, qword [rsi + 240] + LONG $0x2454930f; BYTE $0x20 // setae byte [rsp + 32] + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + QUAD $0x000000f89610fbc5 // vmovsd xmm2, qword [rsi + 248] + QUAD $0x000001402494930f // setae byte [rsp + 320] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x2454930f; BYTE $0x1c // setae byte [rsp + 28] + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + LONG $0xd02ef9c5 // vucomisd xmm2, xmm0 + WORD $0x930f; BYTE $0xd1 // setae cl WORD $0xd200 // add dl, dl LONG $0x78245402 // add dl, byte [rsp + 120] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x04e1c041 // shl r9b, 4 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000000a02494b60f // movzx edx, byte [rsp + 160] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000008024bcb60f // movzx edi, byte [rsp + 128] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xc000 // add al, al - LONG $0x50244402 // add al, byte [rsp + 80] - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 + QUAD $0x0000802484b60f44; BYTE $0x00 // movzx r8d, byte [rsp + 128] + LONG $0x06e0c041 // shl r8b, 6 + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + LONG $0x24540244; BYTE $0x60 // add r10b, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + LONG $0x03e7c041 // shl r15b, 3 + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xf8 // or al, r15b + WORD $0x0840; BYTE $0xd7 // or dil, dl + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + WORD $0xc308 // or bl, al + WORD $0x0045; BYTE $0xdb // add r11b, r11b + LONG $0x245c0244; BYTE $0x28 // add r11b, byte [rsp + 40] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xde // or r14b, r11b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xf4 // or r12b, r14b + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x8941; BYTE $0xc0 // mov r8d, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0xd308 // or bl, dl LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx + WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xd5 // or r13b, dl QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl + WORD $0x8840; BYTE $0x3a // mov byte [rdx], dil + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xc000 // add al, al + LONG $0x20248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 288] + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0x5a88; BYTE $0x01 // mov byte [rdx + 1], bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + LONG $0x026a8844 // mov byte [rdx + 2], r13b + WORD $0xc108 // or cl, al + WORD $0x4a88; BYTE $0x03 // mov byte [rdx + 3], cl LONG $0x04c28348 // add rdx, 4 QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB10_23 + JNE LBB10_183 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_104 - JMP LBB10_175 + JL LBB10_186 + JMP LBB10_194 -LBB10_25: +LBB10_2: WORD $0xff83; BYTE $0x02 // cmp edi, 2 - JE LBB10_80 + JE LBB10_33 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB10_175 + JNE LBB10_194 WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -53599,11 +54970,11 @@ LBB10_25: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_128 + JE LBB10_5 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d WORD $0x894d; BYTE $0xdd // mov r13, r11 -LBB10_29: +LBB10_59: WORD $0x3844; BYTE $0x36 // cmp byte [rsi], r14b LONG $0x01768d48 // lea rsi, [rsi + 1] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -53624,40 +54995,40 @@ LBB10_29: LONG $0x3d5c8841; BYTE $0x00 // mov byte [r13 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_29 + JNE LBB10_59 LONG $0x01c58349 // add r13, 1 LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_129 + JL LBB10_62 -LBB10_31: +LBB10_63: LONG $0x20ff8349 // cmp r15, 32 LONG $0x24748944; BYTE $0x1c // mov dword [rsp + 28], r14d QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x0000026024bc894c // mov qword [rsp + 608], r15 - JB LBB10_34 + JB LBB10_64 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc5 // cmp r13, rax - JAE LBB10_182 + JAE LBB10_67 QUAD $0x00000000bd048d4a // lea rax, [4*r15] WORD $0x014c; BYTE $0xe8 // add rax, r13 WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB10_182 + JAE LBB10_67 -LBB10_34: +LBB10_64: WORD $0xc031 // xor eax, eax QUAD $0x000001a024848948 // mov qword [rsp + 416], rax WORD $0x8949; BYTE $0xf4 // mov r12, rsi QUAD $0x0000016824ac894c // mov qword [rsp + 360], r13 -LBB10_35: +LBB10_70: WORD $0x894d; BYTE $0xfd // mov r13, r15 QUAD $0x000001a024ac2b4c // sub r13, qword [rsp + 416] QUAD $0x0000009824ac894c // mov qword [rsp + 152], r13 -LBB10_36: +LBB10_71: WORD $0x894c; BYTE $0xe1 // mov rcx, r12 LONG $0x24343845 // cmp byte [r12], r14b LONG $0x24549d0f; BYTE $0x20 // setge byte [rsp + 32] @@ -53820,16 +55191,16 @@ LBB10_36: LONG $0x04c68348 // add rsi, 4 QUAD $0x0000016824b48948 // mov qword [rsp + 360], rsi QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB10_36 + JNE LBB10_71 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x0000026024bc8b4c // mov r15, qword [rsp + 608] - JMP LBB10_130 + JMP LBB10_73 -LBB10_38: +LBB10_16: WORD $0xff83; BYTE $0x07 // cmp edi, 7 - JE LBB10_92 + JE LBB10_124 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB10_175 + JNE LBB10_194 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -53839,10 +55210,10 @@ LBB10_38: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_44 + JE LBB10_22 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_42: +LBB10_20: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -53863,23 +55234,23 @@ LBB10_42: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_42 + JNE LBB10_20 LONG $0x01c38349 // add r11, 1 -LBB10_44: +LBB10_22: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_106 + JL LBB10_23 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 -LBB10_46: +LBB10_141: QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x000000982494930f // setae byte [rsp + 152] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7930f40 // setae dil + LONG $0xd2930f41 // setae r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6930f41 // setae r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -53895,11 +55266,11 @@ LBB10_46: LONG $0x406e394c // cmp qword [rsi + 64], r13 LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0xd7930f40 // setae dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1930f41 // setae r9b + LONG $0xd0930f41 // setae r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2930f41 // setae r10b + LONG $0xd1930f41 // setae r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3930f41 // setae r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -53939,32 +55310,33 @@ LBB10_46: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454930f; BYTE $0x1c // setae byte [rsp + 28] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0930f41 // setae r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x930f; BYTE $0xd2 // setae dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x70 // add dil, byte [rsp + 112] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] @@ -53972,73 +55344,73 @@ LBB10_46: LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1b // mov byte [r11], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x1c // movzx edx, byte [rsp + 28] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xc000 // add al, al + LONG $0x20244402 // add al, byte [rsp + 32] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x03438845 // mov byte [r11 + 3], r8b + LONG $0x03538841 // mov byte [r11 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c38349 // add r11, 4 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB10_46 + JNE LBB10_141 WORD $0x894d; BYTE $0xde // mov r14, r11 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_107 - JMP LBB10_175 + JL LBB10_144 + JMP LBB10_194 -LBB10_48: +LBB10_81: LONG $0x2ab70f44 // movzx r13d, word [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -54048,10 +55420,10 @@ LBB10_48: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_52 + JE LBB10_85 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_50: +LBB10_83: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x02768d48 // lea rsi, [rsi + 2] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -54072,23 +55444,23 @@ LBB10_50: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_50 + JNE LBB10_83 LONG $0x01c38349 // add r11, 1 -LBB10_52: +LBB10_85: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_109 + JL LBB10_86 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB10_54: +LBB10_88: LONG $0x2e394466 // cmp word [rsi], r13w WORD $0x930f; BYTE $0xd0 // setae al LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd7930f40 // setae dil + LONG $0xd2930f41 // setae r10b LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w LONG $0xd6930f41 // setae r14b LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w @@ -54104,11 +55476,11 @@ LBB10_54: LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w - WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0xd7930f40 // setae dil LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w - LONG $0xd1930f41 // setae r9b + LONG $0xd0930f41 // setae r8b LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd2930f41 // setae r10b + LONG $0xd1930f41 // setae r9b LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w LONG $0xd3930f41 // setae r11b LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w @@ -54148,68 +55520,69 @@ LBB10_54: LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w LONG $0x2454930f; BYTE $0x1c // setae byte [rsp + 28] LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w - LONG $0xd0930f41 // setae r8b - WORD $0x0040; BYTE $0xff // add dil, dil - WORD $0x0840; BYTE $0xc7 // or dil, al + WORD $0x930f; BYTE $0xd2 // setae dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x70 // add dil, byte [rsp + 112] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xc900 // add cl, cl @@ -54232,25 +55605,25 @@ LBB10_54: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x40c68348 // add rsi, 64 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB10_54 + JNE LBB10_88 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_110 - JMP LBB10_175 + JL LBB10_91 + JMP LBB10_194 -LBB10_56: +LBB10_97: LONG $0x2ab70f44 // movzx r13d, word [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -54260,10 +55633,10 @@ LBB10_56: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_60 + JE LBB10_101 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_58: +LBB10_99: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -54284,23 +55657,23 @@ LBB10_58: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_58 + JNE LBB10_99 LONG $0x01c38349 // add r11, 1 -LBB10_60: +LBB10_101: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_112 + JL LBB10_102 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB10_62: +LBB10_104: LONG $0x2e394466 // cmp word [rsi], r13w QUAD $0x0000009824949d0f // setge byte [rsp + 152] LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd79d0f40 // setge dil + LONG $0xd29d0f41 // setge r10b LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w LONG $0xd69d0f41 // setge r14b LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w @@ -54316,11 +55689,11 @@ LBB10_62: LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w - WORD $0x9d0f; BYTE $0xd2 // setge dl + LONG $0xd79d0f40 // setge dil LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w - LONG $0xd19d0f41 // setge r9b + LONG $0xd09d0f41 // setge r8b LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd29d0f41 // setge r10b + LONG $0xd19d0f41 // setge r9b LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w LONG $0xd39d0f41 // setge r11b LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w @@ -54360,67 +55733,68 @@ LBB10_62: LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w LONG $0x24549d0f; BYTE $0x1c // setge byte [rsp + 28] LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w - LONG $0xd09d0f41 // setge r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x9d0f; BYTE $0xd2 // setge dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x70 // add dil, byte [rsp + 112] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xc900 // add cl, cl @@ -54443,25 +55817,25 @@ LBB10_62: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x40c68348 // add rsi, 64 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB10_62 + JNE LBB10_104 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_113 - JMP LBB10_175 + JL LBB10_107 + JMP LBB10_194 -LBB10_64: +LBB10_150: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -54471,10 +55845,10 @@ LBB10_64: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_68 + JE LBB10_154 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_66: +LBB10_152: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -54495,23 +55869,23 @@ LBB10_66: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_66 + JNE LBB10_152 LONG $0x01c38349 // add r11, 1 -LBB10_68: +LBB10_154: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_115 + JL LBB10_155 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB10_70: +LBB10_157: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x0000009824949d0f // setge byte [rsp + 152] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd79d0f40 // setge dil + LONG $0xd29d0f41 // setge r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd69d0f41 // setge r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -54527,11 +55901,11 @@ LBB10_70: LONG $0x406e394c // cmp qword [rsi + 64], r13 LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x9d0f; BYTE $0xd2 // setge dl + LONG $0xd79d0f40 // setge dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd19d0f41 // setge r9b + LONG $0xd09d0f41 // setge r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd29d0f41 // setge r10b + LONG $0xd19d0f41 // setge r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd39d0f41 // setge r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -54571,67 +55945,68 @@ LBB10_70: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x24549d0f; BYTE $0x1c // setge byte [rsp + 28] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd09d0f41 // setge r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x9d0f; BYTE $0xd2 // setge dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x70 // add dil, byte [rsp + 112] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x0000011024848b48 // mov rax, qword [rsp + 272] + WORD $0x1888 // mov byte [rax], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl + WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xc900 // add cl, cl @@ -54654,25 +56029,25 @@ LBB10_70: WORD $0xd908 // or cl, bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + WORD $0xca08 // or dl, cl + LONG $0x02788844 // mov byte [rax + 2], r15b + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c28348 // add rdx, 4 - QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000011024848948 // mov qword [rsp + 272], rax QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB10_70 + JNE LBB10_157 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_116 - JMP LBB10_175 + JL LBB10_160 + JMP LBB10_194 -LBB10_72: +LBB10_166: LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -54682,14 +56057,15 @@ LBB10_72: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x0210fac5 // vmovss xmm0, dword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_76 + JE LBB10_170 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_74: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - WORD $0x960f; BYTE $0xd2 // setbe dl +LBB10_168: + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] LONG $0x04c68348 // add rsi, 4 - WORD $0xdaf6 // neg dl + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x000000ba; BYTE $0x00 // mov edx, 0 + WORD $0xd280; BYTE $0xff // adc dl, -1 LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax @@ -54706,182 +56082,210 @@ LBB10_74: LONG $0x3b1c8841 // mov byte [r11 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_74 + JNE LBB10_168 LONG $0x01c38349 // add r11, 1 -LBB10_76: +LBB10_170: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_118 + JL LBB10_171 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB10_78: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - QUAD $0x000000a02494960f // setbe byte [rsp + 160] - LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] - LONG $0xd1960f41 // setbe r9b - LONG $0x462ef8c5; BYTE $0x08 // vucomiss xmm0, dword [rsi + 8] - LONG $0xd6960f41 // setbe r14b - LONG $0x462ef8c5; BYTE $0x0c // vucomiss xmm0, dword [rsi + 12] - LONG $0xd5960f41 // setbe r13b - LONG $0x462ef8c5; BYTE $0x10 // vucomiss xmm0, dword [rsi + 16] - QUAD $0x000000902494960f // setbe byte [rsp + 144] - LONG $0x462ef8c5; BYTE $0x14 // vucomiss xmm0, dword [rsi + 20] - LONG $0x2454960f; BYTE $0x60 // setbe byte [rsp + 96] - LONG $0x462ef8c5; BYTE $0x18 // vucomiss xmm0, dword [rsi + 24] - WORD $0x960f; BYTE $0xd0 // setbe al - LONG $0x462ef8c5; BYTE $0x1c // vucomiss xmm0, dword [rsi + 28] - WORD $0x960f; BYTE $0xd3 // setbe bl - LONG $0x462ef8c5; BYTE $0x20 // vucomiss xmm0, dword [rsi + 32] - LONG $0x2454960f; BYTE $0x78 // setbe byte [rsp + 120] - LONG $0x462ef8c5; BYTE $0x24 // vucomiss xmm0, dword [rsi + 36] - WORD $0x960f; BYTE $0xd2 // setbe dl - LONG $0x462ef8c5; BYTE $0x28 // vucomiss xmm0, dword [rsi + 40] - LONG $0xd7960f40 // setbe dil - LONG $0x462ef8c5; BYTE $0x2c // vucomiss xmm0, dword [rsi + 44] - LONG $0xd2960f41 // setbe r10b - LONG $0x462ef8c5; BYTE $0x30 // vucomiss xmm0, dword [rsi + 48] - LONG $0xd3960f41 // setbe r11b - LONG $0x462ef8c5; BYTE $0x34 // vucomiss xmm0, dword [rsi + 52] - LONG $0xd4960f41 // setbe r12b - LONG $0x462ef8c5; BYTE $0x38 // vucomiss xmm0, dword [rsi + 56] - QUAD $0x000000802494960f // setbe byte [rsp + 128] - LONG $0x462ef8c5; BYTE $0x3c // vucomiss xmm0, dword [rsi + 60] - WORD $0x960f; BYTE $0xd1 // setbe cl - LONG $0x462ef8c5; BYTE $0x40 // vucomiss xmm0, dword [rsi + 64] - LONG $0x2454960f; BYTE $0x50 // setbe byte [rsp + 80] - LONG $0x462ef8c5; BYTE $0x44 // vucomiss xmm0, dword [rsi + 68] - LONG $0x2454960f; BYTE $0x70 // setbe byte [rsp + 112] - LONG $0x462ef8c5; BYTE $0x48 // vucomiss xmm0, dword [rsi + 72] - QUAD $0x000000882494960f // setbe byte [rsp + 136] - LONG $0x462ef8c5; BYTE $0x4c // vucomiss xmm0, dword [rsi + 76] - LONG $0x2454960f; BYTE $0x48 // setbe byte [rsp + 72] - LONG $0x462ef8c5; BYTE $0x50 // vucomiss xmm0, dword [rsi + 80] - LONG $0x2454960f; BYTE $0x58 // setbe byte [rsp + 88] - LONG $0x462ef8c5; BYTE $0x54 // vucomiss xmm0, dword [rsi + 84] - LONG $0x2454960f; BYTE $0x68 // setbe byte [rsp + 104] - LONG $0x462ef8c5; BYTE $0x58 // vucomiss xmm0, dword [rsi + 88] - LONG $0x2454960f; BYTE $0x40 // setbe byte [rsp + 64] - LONG $0x462ef8c5; BYTE $0x5c // vucomiss xmm0, dword [rsi + 92] - LONG $0xd7960f41 // setbe r15b - LONG $0x462ef8c5; BYTE $0x60 // vucomiss xmm0, dword [rsi + 96] - LONG $0x2454960f; BYTE $0x20 // setbe byte [rsp + 32] - LONG $0x462ef8c5; BYTE $0x64 // vucomiss xmm0, dword [rsi + 100] - LONG $0x2454960f; BYTE $0x28 // setbe byte [rsp + 40] - LONG $0x462ef8c5; BYTE $0x68 // vucomiss xmm0, dword [rsi + 104] - LONG $0x2454960f; BYTE $0x30 // setbe byte [rsp + 48] - LONG $0x462ef8c5; BYTE $0x6c // vucomiss xmm0, dword [rsi + 108] - LONG $0x2454960f; BYTE $0x38 // setbe byte [rsp + 56] - LONG $0x462ef8c5; BYTE $0x70 // vucomiss xmm0, dword [rsi + 112] - QUAD $0x000001402494960f // setbe byte [rsp + 320] - LONG $0x462ef8c5; BYTE $0x74 // vucomiss xmm0, dword [rsi + 116] - QUAD $0x000001202494960f // setbe byte [rsp + 288] - LONG $0x462ef8c5; BYTE $0x78 // vucomiss xmm0, dword [rsi + 120] - LONG $0x2454960f; BYTE $0x1c // setbe byte [rsp + 28] - LONG $0x462ef8c5; BYTE $0x7c // vucomiss xmm0, dword [rsi + 124] - LONG $0xd0960f41 // setbe r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x000000a0248c0244 // add r9b, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b +LBB10_173: + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] + LONG $0x5610fac5; BYTE $0x04 // vmovss xmm2, dword [rsi + 4] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454930f; BYTE $0x78 // setae byte [rsp + 120] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0x4e10fac5; BYTE $0x08 // vmovss xmm1, dword [rsi + 8] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0xd7930f40 // setae dil + LONG $0x4e10fac5; BYTE $0x0c // vmovss xmm1, dword [rsi + 12] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0xd0930f41 // setae r8b + LONG $0x4e10fac5; BYTE $0x10 // vmovss xmm1, dword [rsi + 16] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0xd1930f41 // setae r9b + LONG $0x4e10fac5; BYTE $0x14 // vmovss xmm1, dword [rsi + 20] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + QUAD $0x000000a02494930f // setae byte [rsp + 160] + LONG $0x4e10fac5; BYTE $0x18 // vmovss xmm1, dword [rsi + 24] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + QUAD $0x000000802494930f // setae byte [rsp + 128] + LONG $0x4e10fac5; BYTE $0x1c // vmovss xmm1, dword [rsi + 28] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + LONG $0x4e10fac5; BYTE $0x20 // vmovss xmm1, dword [rsi + 32] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] + LONG $0x4e10fac5; BYTE $0x24 // vmovss xmm1, dword [rsi + 36] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0xd2930f41 // setae r10b + LONG $0x4e10fac5; BYTE $0x28 // vmovss xmm1, dword [rsi + 40] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + WORD $0x930f; BYTE $0xd3 // setae bl + LONG $0x4e10fac5; BYTE $0x2c // vmovss xmm1, dword [rsi + 44] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0xd7930f41 // setae r15b + LONG $0x4e10fac5; BYTE $0x30 // vmovss xmm1, dword [rsi + 48] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + WORD $0x930f; BYTE $0xd0 // setae al + LONG $0x4e10fac5; BYTE $0x34 // vmovss xmm1, dword [rsi + 52] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0x4e10fac5; BYTE $0x38 // vmovss xmm1, dword [rsi + 56] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454930f; BYTE $0x58 // setae byte [rsp + 88] + LONG $0x4e10fac5; BYTE $0x3c // vmovss xmm1, dword [rsi + 60] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454930f; BYTE $0x48 // setae byte [rsp + 72] + LONG $0x4e10fac5; BYTE $0x40 // vmovss xmm1, dword [rsi + 64] + LONG $0x5610fac5; BYTE $0x44 // vmovss xmm2, dword [rsi + 68] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x48 // vmovss xmm1, dword [rsi + 72] + LONG $0x2454930f; BYTE $0x28 // setae byte [rsp + 40] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0x5610fac5; BYTE $0x4c // vmovss xmm2, dword [rsi + 76] + LONG $0xd3930f41 // setae r11b + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x50 // vmovss xmm1, dword [rsi + 80] + LONG $0xd6930f41 // setae r14b + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0x5610fac5; BYTE $0x54 // vmovss xmm2, dword [rsi + 84] + LONG $0xd4930f41 // setae r12b + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x58 // vmovss xmm1, dword [rsi + 88] + QUAD $0x000000882494930f // setae byte [rsp + 136] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0x5610fac5; BYTE $0x5c // vmovss xmm2, dword [rsi + 92] + LONG $0x2454930f; BYTE $0x50 // setae byte [rsp + 80] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x60 // vmovss xmm1, dword [rsi + 96] + LONG $0x2454930f; BYTE $0x68 // setae byte [rsp + 104] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0x5610fac5; BYTE $0x64 // vmovss xmm2, dword [rsi + 100] + LONG $0xd5930f41 // setae r13b + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x68 // vmovss xmm1, dword [rsi + 104] + QUAD $0x000001202494930f // setae byte [rsp + 288] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0x5610fac5; BYTE $0x6c // vmovss xmm2, dword [rsi + 108] + LONG $0x2454930f; BYTE $0x40 // setae byte [rsp + 64] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x70 // vmovss xmm1, dword [rsi + 112] + LONG $0x2454930f; BYTE $0x30 // setae byte [rsp + 48] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0x5610fac5; BYTE $0x74 // vmovss xmm2, dword [rsi + 116] + LONG $0x2454930f; BYTE $0x38 // setae byte [rsp + 56] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x4e10fac5; BYTE $0x78 // vmovss xmm1, dword [rsi + 120] + LONG $0x2454930f; BYTE $0x20 // setae byte [rsp + 32] + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + LONG $0x5610fac5; BYTE $0x7c // vmovss xmm2, dword [rsi + 124] + QUAD $0x000001402494930f // setae byte [rsp + 320] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x2454930f; BYTE $0x1c // setae byte [rsp + 28] + LONG $0x80ee8348 // sub rsi, -128 + LONG $0xd02ef8c5 // vucomiss xmm2, xmm0 + WORD $0x930f; BYTE $0xd1 // setae cl WORD $0xd200 // add dl, dl LONG $0x78245402 // add dl, byte [rsp + 120] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x04e1c041 // shl r9b, 4 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000000a02494b60f // movzx edx, byte [rsp + 160] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000008024bcb60f // movzx edi, byte [rsp + 128] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xc000 // add al, al - LONG $0x50244402 // add al, byte [rsp + 80] - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 + QUAD $0x0000802484b60f44; BYTE $0x00 // movzx r8d, byte [rsp + 128] + LONG $0x06e0c041 // shl r8b, 6 + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + LONG $0x24540244; BYTE $0x60 // add r10b, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + LONG $0x03e7c041 // shl r15b, 3 + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xf8 // or al, r15b + WORD $0x0840; BYTE $0xd7 // or dil, dl + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] + WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + WORD $0xc308 // or bl, al + WORD $0x0045; BYTE $0xdb // add r11b, r11b + LONG $0x245c0244; BYTE $0x28 // add r11b, byte [rsp + 40] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xde // or r14b, r11b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xf4 // or r12b, r14b + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x8941; BYTE $0xc0 // mov r8d, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0xd308 // or bl, dl LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx + WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xd5 // or r13b, dl QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - WORD $0x1a88 // mov byte [rdx], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4a88; BYTE $0x01 // mov byte [rdx + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl + WORD $0x8840; BYTE $0x3a // mov byte [rdx], dil + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xc000 // add al, al + LONG $0x20248402; WORD $0x0001; BYTE $0x00 // add al, byte [rsp + 288] + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0x5a88; BYTE $0x01 // mov byte [rdx + 1], bl LONG $0x245cb60f; BYTE $0x1c // movzx ebx, byte [rsp + 28] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd8 // or r8b, bl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027a8844 // mov byte [rdx + 2], r15b - LONG $0x03428844 // mov byte [rdx + 3], r8b - LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + LONG $0x026a8844 // mov byte [rdx + 2], r13b + WORD $0xc108 // or cl, al + WORD $0x4a88; BYTE $0x03 // mov byte [rdx + 3], cl LONG $0x04c28348 // add rdx, 4 QUAD $0x0000011024948948 // mov qword [rsp + 272], rdx QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB10_78 + JNE LBB10_173 QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000a824bc8b4c // mov r15, qword [rsp + 168] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_119 - JMP LBB10_175 + JL LBB10_176 + JMP LBB10_194 -LBB10_80: +LBB10_33: WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -54891,10 +56295,10 @@ LBB10_80: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_84 + JE LBB10_37 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_82: +LBB10_35: WORD $0x3844; BYTE $0x36 // cmp byte [rsi], r14b LONG $0x01768d48 // lea rsi, [rsi + 1] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -54915,38 +56319,38 @@ LBB10_82: LONG $0x3b1c8841 // mov byte [r11 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_82 + JNE LBB10_35 LONG $0x01c38349 // add r11, 1 -LBB10_84: +LBB10_37: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_121 + JL LBB10_38 LONG $0x20ff8349 // cmp r15, 32 LONG $0x24748944; BYTE $0x1c // mov dword [rsp + 28], r14d QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x0000017024bc894c // mov qword [rsp + 368], r15 - JB LBB10_88 + JB LBB10_40 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc3 // cmp r11, rax - JAE LBB10_185 + JAE LBB10_43 LONG $0xbb048d4b // lea rax, [r11 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB10_185 + JAE LBB10_43 -LBB10_88: +LBB10_40: WORD $0xc031 // xor eax, eax QUAD $0x0000018024848948 // mov qword [rsp + 384], rax WORD $0x8949; BYTE $0xf4 // mov r12, rsi QUAD $0x00000168249c894c // mov qword [rsp + 360], r11 -LBB10_89: +LBB10_46: QUAD $0x0000018024bc2b4c // sub r15, qword [rsp + 384] QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 -LBB10_90: +LBB10_47: WORD $0x894c; BYTE $0xe1 // mov rcx, r12 LONG $0x24343845 // cmp byte [r12], r14b LONG $0x2454930f; BYTE $0x20 // setae byte [rsp + 32] @@ -55109,12 +56513,12 @@ LBB10_90: LONG $0x04c68348 // add rsi, 4 QUAD $0x0000016824b48948 // mov qword [rsp + 360], rsi QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB10_90 + JNE LBB10_47 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x0000017024bc8b4c // mov r15, qword [rsp + 368] - JMP LBB10_122 + JMP LBB10_49 -LBB10_92: +LBB10_124: WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -55124,10 +56528,10 @@ LBB10_92: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_96 + JE LBB10_128 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_94: +LBB10_126: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x04768d48 // lea rsi, [rsi + 4] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -55148,23 +56552,23 @@ LBB10_94: LONG $0x1b3c8841 // mov byte [r11 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_94 + JNE LBB10_126 LONG $0x01c38349 // add r11, 1 -LBB10_96: +LBB10_128: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_125 + JL LBB10_129 QUAD $0x000001182494894c // mov qword [rsp + 280], r10 QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 QUAD $0x000000a824bc894c // mov qword [rsp + 168], r15 -LBB10_98: +LBB10_131: QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x0000009824949d0f // setge byte [rsp + 152] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd79d0f40 // setge dil + LONG $0xd29d0f41 // setge r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd69d0f41 // setge r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -55180,11 +56584,11 @@ LBB10_98: LONG $0x206e3944 // cmp dword [rsi + 32], r13d LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x9d0f; BYTE $0xd2 // setge dl + LONG $0xd79d0f40 // setge dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd19d0f41 // setge r9b + LONG $0xd09d0f41 // setge r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd29d0f41 // setge r10b + LONG $0xd19d0f41 // setge r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd39d0f41 // setge r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -55224,32 +56628,33 @@ LBB10_98: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x24549d0f; BYTE $0x1c // setge byte [rsp + 28] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd09d0f41 // setge r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x9d0f; BYTE $0xd2 // setge dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x70245402 // add dl, byte [rsp + 112] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + LONG $0x247c0240; BYTE $0x70 // add dil, byte [rsp + 112] QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] @@ -55257,237 +56662,237 @@ LBB10_98: LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1b // mov byte [r11], bl LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014b8841 // mov byte [r11 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000140248cb60f // movzx ecx, byte [rsp + 320] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x1c // movzx edx, byte [rsp + 28] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xc000 // add al, al + LONG $0x20244402 // add al, byte [rsp + 32] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001402484b60f // movzx eax, byte [rsp + 320] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + QUAD $0x000001202484b60f // movzx eax, byte [rsp + 288] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x1c // movzx ecx, byte [rsp + 28] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027b8845 // mov byte [r11 + 2], r15b - LONG $0x03438845 // mov byte [r11 + 3], r8b + LONG $0x03538841 // mov byte [r11 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c38349 // add r11, 4 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB10_98 + JNE LBB10_131 WORD $0x894d; BYTE $0xde // mov r14, r11 QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JL LBB10_126 - JMP LBB10_175 + JL LBB10_134 + JMP LBB10_194 -LBB10_100: +LBB10_14: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_101: +LBB10_118: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_133 + JNE LBB10_122 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_135 + JMP LBB10_120 -LBB10_103: +LBB10_32: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_104: +LBB10_186: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_137 + JNE LBB10_188 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_139 + JMP LBB10_190 -LBB10_106: +LBB10_23: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_107: +LBB10_144: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_141 + JNE LBB10_148 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_143 + JMP LBB10_146 -LBB10_109: +LBB10_86: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_110: +LBB10_91: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_145 + JNE LBB10_95 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_147 + JMP LBB10_93 -LBB10_112: +LBB10_102: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_113: +LBB10_107: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_150 + JNE LBB10_112 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_152 + JMP LBB10_109 -LBB10_115: +LBB10_155: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_116: +LBB10_160: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_154 + JNE LBB10_164 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_156 + JMP LBB10_162 -LBB10_118: +LBB10_171: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_119: +LBB10_176: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_158 + JNE LBB10_180 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_160 + JMP LBB10_178 -LBB10_121: +LBB10_38: QUAD $0x00000168249c894c // mov qword [rsp + 360], r11 WORD $0x8949; BYTE $0xf4 // mov r12, rsi -LBB10_122: +LBB10_49: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_163 + JNE LBB10_52 WORD $0xf631 // xor esi, esi - JMP LBB10_166 + JMP LBB10_55 -LBB10_125: +LBB10_129: WORD $0x894d; BYTE $0xde // mov r14, r11 LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 -LBB10_126: +LBB10_134: WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_168 + JNE LBB10_138 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_170 + JMP LBB10_136 -LBB10_128: +LBB10_5: WORD $0x894d; BYTE $0xdd // mov r13, r11 LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JGE LBB10_31 + JGE LBB10_63 -LBB10_129: +LBB10_62: QUAD $0x0000016824ac894c // mov qword [rsp + 360], r13 WORD $0x8949; BYTE $0xf4 // mov r12, rsi -LBB10_130: +LBB10_73: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_175 + JGE LBB10_194 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_176 + JNE LBB10_76 WORD $0xf631 // xor esi, esi - JMP LBB10_179 + JMP LBB10_79 -LBB10_133: +LBB10_122: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_134: +LBB10_123: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x000000bf; BYTE $0x00 // mov edi, 0 LONG $0xffd78040 // adc dil, -1 @@ -55515,41 +56920,43 @@ LBB10_134: WORD $0xc330 // xor bl, al LONG $0x171c8841 // mov byte [r15 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB10_134 + JNE LBB10_123 -LBB10_135: +LBB10_120: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 WORD $0xc031 // xor eax, eax WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - JMP LBB10_149 + JMP LBB10_192 -LBB10_137: +LBB10_188: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_138: - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - WORD $0x960f; BYTE $0xd0 // setbe al - WORD $0xd8f6 // neg al +LBB10_189: + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x000000b8; BYTE $0x00 // mov eax, 0 + WORD $0xff14 // adc al, -1 WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] + WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b LONG $0x3f1c8841 // mov byte [r15 + rdi], bl LONG $0x02c38349 // add r11, 2 - LONG $0x462ef9c5; BYTE $0x08 // vucomisd xmm0, qword [rsi + 8] - WORD $0x960f; BYTE $0xd0 // setbe al + LONG $0x4e10fbc5; BYTE $0x08 // vmovsd xmm1, qword [rsi + 8] LONG $0x10c68348 // add rsi, 16 - WORD $0xd8f6 // neg al + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + LONG $0x000000b8; BYTE $0x00 // mov eax, 0 + WORD $0xff14 // adc al, -1 WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b2 // mov dl, 1 @@ -55558,21 +56965,23 @@ LBB10_138: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_138 + JNE LBB10_189 -LBB10_139: +LBB10_190: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 - LONG $0x062ef9c5 // vucomisd xmm0, qword [rsi] - JMP LBB10_162 + JE LBB10_194 + LONG $0x0e10fbc5 // vmovsd xmm1, qword [rsi] + WORD $0xc031 // xor eax, eax + LONG $0xc82ef9c5 // vucomisd xmm1, xmm0 + JMP LBB10_192 -LBB10_141: +LBB10_148: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_142: +LBB10_149: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x000000bf; BYTE $0x00 // mov edi, 0 LONG $0xffd78040 // adc dil, -1 @@ -55600,22 +57009,22 @@ LBB10_142: WORD $0xc330 // xor bl, al LONG $0x171c8841 // mov byte [r15 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB10_142 + JNE LBB10_149 -LBB10_143: +LBB10_146: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 WORD $0xc031 // xor eax, eax WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - JMP LBB10_149 + JMP LBB10_192 -LBB10_145: +LBB10_95: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_146: +LBB10_96: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x000000bf; BYTE $0x00 // mov edi, 0 LONG $0xffd78040 // adc dil, -1 @@ -55643,34 +57052,22 @@ LBB10_146: WORD $0xc330 // xor bl, al LONG $0x171c8841 // mov byte [r15 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB10_146 + JNE LBB10_96 -LBB10_147: +LBB10_93: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 WORD $0xc031 // xor eax, eax LONG $0x2e394466 // cmp word [rsi], r13w + JMP LBB10_192 -LBB10_149: - WORD $0xff14 // adc al, -1 - WORD $0x894c; BYTE $0xda // mov rdx, r11 - LONG $0x03eac148 // shr rdx, 3 - LONG $0x16348a41 // mov sil, byte [r14 + rdx] - LONG $0x07e38041 // and r11b, 7 - WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d - WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf0 // xor al, sil - WORD $0xc320 // and bl, al - JMP LBB10_174 - -LBB10_150: +LBB10_112: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_151: +LBB10_113: LONG $0x2e394466 // cmp word [rsi], r13w WORD $0x9d0f; BYTE $0xd0 // setge al WORD $0xd8f6 // neg al @@ -55698,21 +57095,21 @@ LBB10_151: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_151 + JNE LBB10_113 -LBB10_152: +LBB10_109: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 LONG $0x2e394466 // cmp word [rsi], r13w - JMP LBB10_172 + JMP LBB10_111 -LBB10_154: +LBB10_164: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_155: +LBB10_165: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 WORD $0x9d0f; BYTE $0xd0 // setge al WORD $0xd8f6 // neg al @@ -55740,40 +57137,42 @@ LBB10_155: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_155 + JNE LBB10_165 -LBB10_156: +LBB10_162: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - JMP LBB10_172 + JMP LBB10_111 -LBB10_158: +LBB10_180: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_159: - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] - WORD $0x960f; BYTE $0xd0 // setbe al - WORD $0xd8f6 // neg al +LBB10_181: + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x000000b8; BYTE $0x00 // mov eax, 0 + WORD $0xff14 // adc al, -1 WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 LONG $0x0cb60f45; BYTE $0x3f // movzx r9d, byte [r15 + rdi] + WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b LONG $0x3f1c8841 // mov byte [r15 + rdi], bl LONG $0x02c38349 // add r11, 2 - LONG $0x462ef8c5; BYTE $0x04 // vucomiss xmm0, dword [rsi + 4] - WORD $0x960f; BYTE $0xd0 // setbe al + LONG $0x4e10fac5; BYTE $0x04 // vmovss xmm1, dword [rsi + 4] LONG $0x08c68348 // add rsi, 8 - WORD $0xd8f6 // neg al + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 + LONG $0x000000b8; BYTE $0x00 // mov eax, 0 + WORD $0xff14 // adc al, -1 WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b2 // mov dl, 1 @@ -55782,24 +57181,35 @@ LBB10_159: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_159 + JNE LBB10_181 -LBB10_160: +LBB10_178: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 - LONG $0x062ef8c5 // vucomiss xmm0, dword [rsi] + JE LBB10_194 + LONG $0x0e10fac5 // vmovss xmm1, dword [rsi] + WORD $0xc031 // xor eax, eax + LONG $0xc82ef8c5 // vucomiss xmm1, xmm0 -LBB10_162: - WORD $0x960f; BYTE $0xd0 // setbe al - JMP LBB10_173 +LBB10_192: + WORD $0xff14 // adc al, -1 + WORD $0x894c; BYTE $0xda // mov rdx, r11 + LONG $0x03eac148 // shr rdx, 3 + LONG $0x16348a41 // mov sil, byte [r14 + rdx] + LONG $0x07e38041 // and r11b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf0 // xor al, sil + WORD $0xc320 // and bl, al + JMP LBB10_193 -LBB10_163: +LBB10_52: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi QUAD $0x00000168249c8b4c // mov r11, qword [rsp + 360] -LBB10_164: +LBB10_53: LONG $0x34343845 // cmp byte [r12 + rsi], r14b LONG $0x000000bb; BYTE $0x00 // mov ebx, 0 WORD $0xd380; BYTE $0xff // adc bl, -1 @@ -55826,12 +57236,12 @@ LBB10_164: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB10_164 + JNE LBB10_53 WORD $0x0149; BYTE $0xf4 // add r12, rsi -LBB10_166: +LBB10_55: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 WORD $0xc031 // xor eax, eax LONG $0x24343845 // cmp byte [r12], r14b WORD $0xff14 // adc al, -1 @@ -55845,15 +57255,15 @@ LBB10_166: WORD $0xe3d2 // shl bl, cl WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al - JMP LBB10_181 + JMP LBB10_57 -LBB10_168: +LBB10_138: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_169: +LBB10_139: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x9d0f; BYTE $0xd0 // setge al WORD $0xd8f6 // neg al @@ -55881,17 +57291,15 @@ LBB10_169: WORD $0xda30 // xor dl, bl LONG $0x3f148841 // mov byte [r15 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_169 + JNE LBB10_139 -LBB10_170: +LBB10_136: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d -LBB10_172: +LBB10_111: WORD $0x9d0f; BYTE $0xd0 // setge al - -LBB10_173: WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 @@ -55903,22 +57311,22 @@ LBB10_173: WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al -LBB10_174: +LBB10_193: WORD $0x3040; BYTE $0xf3 // xor bl, sil LONG $0x161c8841 // mov byte [r14 + rdx], bl -LBB10_175: +LBB10_194: MOVQ 1344(SP), SP VZEROUPPER RET -LBB10_176: +LBB10_76: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi QUAD $0x00000168249c8b4c // mov r11, qword [rsp + 360] -LBB10_177: +LBB10_77: LONG $0x34343845 // cmp byte [r12 + rsi], r14b WORD $0x9d0f; BYTE $0xd3 // setge bl WORD $0xdbf6 // neg bl @@ -55945,12 +57353,12 @@ LBB10_177: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB10_177 + JNE LBB10_77 WORD $0x0149; BYTE $0xf4 // add r12, rsi -LBB10_179: +LBB10_79: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_175 + JE LBB10_194 LONG $0x24343845 // cmp byte [r12], r14b WORD $0x9d0f; BYTE $0xd0 // setge al WORD $0xd8f6 // neg al @@ -55965,12 +57373,12 @@ LBB10_179: WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al -LBB10_181: +LBB10_57: WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB10_175 + JMP LBB10_194 -LBB10_182: +LBB10_67: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 @@ -55986,7 +57394,7 @@ LBB10_182: WORD $0xc031 // xor eax, eax QUAD $0x0000011024ac894c // mov qword [rsp + 272], r13 -LBB10_183: +LBB10_68: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000017824848948 // mov qword [rsp + 376], rax LONG $0x05e3c148 // shl rbx, 5 @@ -58077,16 +59485,16 @@ LBB10_183: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x000001a0248c3b48 // cmp rcx, qword [rsp + 416] - JNE LBB10_183 + JNE LBB10_68 QUAD $0x0000026024bc8b4c // mov r15, qword [rsp + 608] QUAD $0x000001a024bc3b4c // cmp r15, qword [rsp + 416] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] LONG $0x24748b44; BYTE $0x1c // mov r14d, dword [rsp + 28] QUAD $0x0000017024a48b4c // mov r12, qword [rsp + 368] - JNE LBB10_35 - JMP LBB10_130 + JNE LBB10_70 + JMP LBB10_73 -LBB10_185: +LBB10_43: LONG $0xe0e78349 // and r15, -32 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 @@ -58101,7 +59509,7 @@ LBB10_185: WORD $0xc031 // xor eax, eax QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 -LBB10_186: +LBB10_44: WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0000017824848948 // mov qword [rsp + 376], rax LONG $0x05e3c148 // shl rbx, 5 @@ -60238,14 +61646,14 @@ LBB10_186: LONG $0x20c18348 // add rcx, 32 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x00000180248c3b48 // cmp rcx, qword [rsp + 384] - JNE LBB10_186 + JNE LBB10_44 QUAD $0x0000017024bc8b4c // mov r15, qword [rsp + 368] QUAD $0x0000018024bc3b4c // cmp r15, qword [rsp + 384] QUAD $0x0000011824948b4c // mov r10, qword [rsp + 280] LONG $0x24748b44; BYTE $0x1c // mov r14d, dword [rsp + 28] QUAD $0x0000021824a48b4c // mov r12, qword [rsp + 536] - JNE LBB10_89 - JMP LBB10_122 + JNE LBB10_46 + JMP LBB10_49 DATA LCDATA8<>+0x000(SB)/8, $0x0202020202020202 DATA LCDATA8<>+0x008(SB)/8, $0x0202020202020202 diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_noasm.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_noasm.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_noasm.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_noasm.go index 56abad42..61966769 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_noasm.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_noasm.go @@ -18,8 +18,8 @@ package kernels -import "github.com/apache/arrow/go/v14/arrow/compute/exec" +import "github.com/apache/arrow-go/v18/arrow" -func genCompareKernel[T exec.NumericTypes](op CompareOperator) *CompareData { +func genCompareKernel[T arrow.NumericType](op CompareOperator) *CompareData { return genGoCompareKernel(getCmpOp[T](op)) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go index 7cf96a41..24f05881 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s similarity index 84% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s index 00fdac38..635738a7 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s @@ -1,4 +1,4 @@ -//go:build go1.18 && !noasm && !appengine +//+build !noasm !appengine // AUTO-GENERATED BY C2GOASM -- DO NOT EDIT TEXT ·_comparison_equal_arr_arr_sse4(SB), $80-48 @@ -11,8 +11,9 @@ TEXT ·_comparison_equal_arr_arr_sse4(SB), $80-48 MOVQ offset+40(FP), R9 ADDQ $8, SP - WORD $0x894d; BYTE $0xc3 // mov r11, r8 - WORD $0x8949; BYTE $0xce // mov r14, rcx + WORD $0x8944; BYTE $0xc8 // mov eax, r9d + WORD $0x894d; BYTE $0xc6 // mov r14, r8 + WORD $0x8949; BYTE $0xcc // mov r12, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB0_29 WORD $0xff83; BYTE $0x03 // cmp edi, 3 @@ -23,16 +24,16 @@ TEXT ·_comparison_equal_arr_arr_sse4(SB), $80-48 JE LBB0_79 WORD $0xff83; BYTE $0x06 // cmp edi, 6 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_22 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_20: WORD $0x0e8b // mov ecx, dword [rsi] @@ -45,7 +46,7 @@ LBB0_20: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -54,49 +55,49 @@ LBB0_20: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_20 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_22: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_26 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_24: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5940f41 // sete r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0940f41 // sete r8b @@ -108,165 +109,165 @@ LBB0_24: LONG $0xd7940f41 // sete r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2940f41 // sete r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6940f41 // sete r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4940f41 // sete r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1940f41 // sete r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_24 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_26: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_28: @@ -277,16 +278,16 @@ LBB0_28: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_28 JMP LBB0_123 @@ -299,266 +300,361 @@ LBB0_29: JE LBB0_112 WORD $0xff83; BYTE $0x0c // cmp edi, 12 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_50 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB0_48: LONG $0x06100ff2 // movsd xmm0, qword [rsi] LONG $0x08c68348 // add rsi, 8 LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd2940f41 // sete r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9b0f; BYTE $0xd1 // setnp cl + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB0_48 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_50: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_54 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB0_52: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x06100ff2 // movsd xmm0, qword [rsi] - LONG $0x4e100ff2; BYTE $0x08 // movsd xmm1, qword [rsi + 8] LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] - LONG $0x4a2e0f66; BYTE $0x08 // ucomisd xmm1, qword [rdx + 8] - WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x46100ff2; BYTE $0x08 // movsd xmm0, qword [rsi + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x422e0f66; BYTE $0x08 // ucomisd xmm0, qword [rdx + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x46100ff2; BYTE $0x10 // movsd xmm0, qword [rsi + 16] LONG $0x422e0f66; BYTE $0x10 // ucomisd xmm0, qword [rdx + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x46100ff2; BYTE $0x18 // movsd xmm0, qword [rsi + 24] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x422e0f66; BYTE $0x18 // ucomisd xmm0, qword [rdx + 24] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x46100ff2; BYTE $0x20 // movsd xmm0, qword [rsi + 32] LONG $0x422e0f66; BYTE $0x20 // ucomisd xmm0, qword [rdx + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x46100ff2; BYTE $0x28 // movsd xmm0, qword [rsi + 40] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x422e0f66; BYTE $0x28 // ucomisd xmm0, qword [rdx + 40] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x46100ff2; BYTE $0x30 // movsd xmm0, qword [rsi + 48] LONG $0x422e0f66; BYTE $0x30 // ucomisd xmm0, qword [rdx + 48] - LONG $0x46100ff2; BYTE $0x38 // movsd xmm0, qword [rsi + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al + LONG $0x46100ff2; BYTE $0x38 // movsd xmm0, qword [rsi + 56] LONG $0x422e0f66; BYTE $0x38 // ucomisd xmm0, qword [rdx + 56] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x46100ff2; BYTE $0x40 // movsd xmm0, qword [rsi + 64] LONG $0x422e0f66; BYTE $0x40 // ucomisd xmm0, qword [rdx + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x46100ff2; BYTE $0x48 // movsd xmm0, qword [rsi + 72] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x422e0f66; BYTE $0x48 // ucomisd xmm0, qword [rdx + 72] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x46100ff2; BYTE $0x50 // movsd xmm0, qword [rsi + 80] LONG $0x422e0f66; BYTE $0x50 // ucomisd xmm0, qword [rdx + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x46100ff2; BYTE $0x58 // movsd xmm0, qword [rsi + 88] - LONG $0xd1940f41 // sete r9b LONG $0x422e0f66; BYTE $0x58 // ucomisd xmm0, qword [rdx + 88] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x46100ff2; BYTE $0x60 // movsd xmm0, qword [rsi + 96] LONG $0x422e0f66; BYTE $0x60 // ucomisd xmm0, qword [rdx + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x46100ff2; BYTE $0x68 // movsd xmm0, qword [rsi + 104] - LONG $0xd2940f41 // sete r10b LONG $0x422e0f66; BYTE $0x68 // ucomisd xmm0, qword [rdx + 104] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x46100ff2; BYTE $0x70 // movsd xmm0, qword [rsi + 112] LONG $0x422e0f66; BYTE $0x70 // ucomisd xmm0, qword [rdx + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x46100ff2; BYTE $0x78 // movsd xmm0, qword [rsi + 120] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x422e0f66; BYTE $0x78 // ucomisd xmm0, qword [rdx + 120] - WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl QUAD $0x0000008086100ff2 // movsd xmm0, qword [rsi + 128] QUAD $0x00000080822e0f66 // ucomisd xmm0, qword [rdx + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl QUAD $0x0000008886100ff2 // movsd xmm0, qword [rsi + 136] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] QUAD $0x00000088822e0f66 // ucomisd xmm0, qword [rdx + 136] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl QUAD $0x0000009086100ff2 // movsd xmm0, qword [rsi + 144] - LONG $0xd6940f41 // sete r14b QUAD $0x00000090822e0f66 // ucomisd xmm0, qword [rdx + 144] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl QUAD $0x0000009886100ff2 // movsd xmm0, qword [rsi + 152] - LONG $0xd4940f41 // sete r12b QUAD $0x00000098822e0f66 // ucomisd xmm0, qword [rdx + 152] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al QUAD $0x000000a086100ff2 // movsd xmm0, qword [rsi + 160] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] QUAD $0x000000a0822e0f66 // ucomisd xmm0, qword [rdx + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl QUAD $0x000000a886100ff2 // movsd xmm0, qword [rsi + 168] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] QUAD $0x000000a8822e0f66 // ucomisd xmm0, qword [rdx + 168] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl QUAD $0x000000b086100ff2 // movsd xmm0, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] QUAD $0x000000b0822e0f66 // ucomisd xmm0, qword [rdx + 176] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl QUAD $0x000000b886100ff2 // movsd xmm0, qword [rsi + 184] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] QUAD $0x000000b8822e0f66 // ucomisd xmm0, qword [rdx + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al QUAD $0x000000c086100ff2 // movsd xmm0, qword [rsi + 192] - LONG $0xd0940f41 // sete r8b QUAD $0x000000c0822e0f66 // ucomisd xmm0, qword [rdx + 192] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl QUAD $0x000000c886100ff2 // movsd xmm0, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] QUAD $0x000000c8822e0f66 // ucomisd xmm0, qword [rdx + 200] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl QUAD $0x000000d086100ff2 // movsd xmm0, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] QUAD $0x000000d0822e0f66 // ucomisd xmm0, qword [rdx + 208] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al QUAD $0x000000d886100ff2 // movsd xmm0, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] QUAD $0x000000d8822e0f66 // ucomisd xmm0, qword [rdx + 216] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al QUAD $0x000000e086100ff2 // movsd xmm0, qword [rsi + 224] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] QUAD $0x000000e0822e0f66 // ucomisd xmm0, qword [rdx + 224] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl QUAD $0x000000e886100ff2 // movsd xmm0, qword [rsi + 232] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] QUAD $0x000000e8822e0f66 // ucomisd xmm0, qword [rdx + 232] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl QUAD $0x000000f086100ff2 // movsd xmm0, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] QUAD $0x000000f0822e0f66 // ucomisd xmm0, qword [rdx + 240] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al QUAD $0x000000f886100ff2 // movsd xmm0, qword [rsi + 248] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 QUAD $0x000000f8822e0f66 // ucomisd xmm0, qword [rdx + 248] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 + LONG $0x04e5c041 // shl r13b, 4 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0841; BYTE $0xce // or r14b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x74b60f44; WORD $0x0324 // movzx r14d, byte [rsp + 3] + WORD $0x0845; BYTE $0xee // or r14b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xcf // or r15b, cl + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x03e2c041 // shl r10b, 3 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xd1 // or cl, r10b + LONG $0x24348845 // mov byte [r12], r14b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB0_52 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB0_54: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_56: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x04100ff2; BYTE $0xce // movsd xmm0, qword [rsi + 8*rcx] LONG $0x042e0f66; BYTE $0xca // ucomisd xmm0, qword [rdx + 8*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0xdbf6 // neg bl + WORD $0x9b0f; BYTE $0xd3 // setnp bl + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xd820 // and al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB0_56 JMP LBB0_123 @@ -567,16 +663,16 @@ LBB0_2: JE LBB0_57 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_8 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_6: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -589,7 +685,7 @@ LBB0_6: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -598,49 +694,49 @@ LBB0_6: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_6 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_8: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_12 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB0_10: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7940f41 // sete r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7940f40 // sete dil @@ -655,16 +751,16 @@ LBB0_10: LONG $0xd6940f41 // sete r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd0940f41 // sete r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4940f41 // sete r12b @@ -673,144 +769,144 @@ LBB0_10: LONG $0xd5940f41 // sete r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1940f41 // sete r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0940f41 // sete r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB0_10 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB0_12: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_14: @@ -821,16 +917,16 @@ LBB0_14: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_14 JMP LBB0_123 @@ -839,16 +935,16 @@ LBB0_30: JE LBB0_90 WORD $0xff83; BYTE $0x08 // cmp edi, 8 JNE LBB0_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_36 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_34: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -861,7 +957,7 @@ LBB0_34: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -870,49 +966,49 @@ LBB0_34: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_34 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_36: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_40 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_38: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5940f41 // sete r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0940f41 // sete r8b @@ -924,165 +1020,165 @@ LBB0_38: LONG $0xd7940f41 // sete r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2940f41 // sete r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6940f41 // sete r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4940f41 // sete r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1940f41 // sete r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_38 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_40: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_42: @@ -1093,30 +1189,30 @@ LBB0_42: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_42 JMP LBB0_123 LBB0_68: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_72 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_70: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -1129,7 +1225,7 @@ LBB0_70: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -1138,49 +1234,49 @@ LBB0_70: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_70 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_72: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_76 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_74: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5940f41 // sete r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0940f41 // sete r8b @@ -1192,165 +1288,165 @@ LBB0_74: LONG $0xd7940f41 // sete r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2940f41 // sete r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6940f41 // sete r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4940f41 // sete r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1940f41 // sete r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_74 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_76: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_78: @@ -1361,30 +1457,30 @@ LBB0_78: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_78 JMP LBB0_123 LBB0_79: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_83 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_81: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -1397,7 +1493,7 @@ LBB0_81: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -1406,49 +1502,49 @@ LBB0_81: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_81 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_83: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_87 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_85: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5940f41 // sete r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0940f41 // sete r8b @@ -1460,165 +1556,165 @@ LBB0_85: LONG $0xd7940f41 // sete r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2940f41 // sete r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6940f41 // sete r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4940f41 // sete r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1940f41 // sete r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_85 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_87: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_89: @@ -1629,30 +1725,30 @@ LBB0_89: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_89 JMP LBB0_123 LBB0_101: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_105 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_103: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -1665,7 +1761,7 @@ LBB0_103: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -1674,49 +1770,49 @@ LBB0_103: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_103 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_105: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_109 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_107: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5940f41 // sete r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0940f41 // sete r8b @@ -1728,165 +1824,165 @@ LBB0_107: LONG $0xd7940f41 // sete r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2940f41 // sete r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6940f41 // sete r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4940f41 // sete r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1940f41 // sete r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_107 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_109: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_111: @@ -1897,294 +1993,389 @@ LBB0_111: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_111 JMP LBB0_123 LBB0_112: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_116 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB0_114: LONG $0x06100ff3 // movss xmm0, dword [rsi] LONG $0x04c68348 // add rsi, 4 WORD $0x2e0f; BYTE $0x02 // ucomiss xmm0, dword [rdx] LONG $0x04528d48 // lea rdx, [rdx + 4] - LONG $0xd2940f41 // sete r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9b0f; BYTE $0xd1 // setnp cl + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB0_114 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_116: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_120 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB0_118: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x06100ff3 // movss xmm0, dword [rsi] - LONG $0x4e100ff3; BYTE $0x04 // movss xmm1, dword [rsi + 4] WORD $0x2e0f; BYTE $0x02 // ucomiss xmm0, dword [rdx] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] - LONG $0x044a2e0f // ucomiss xmm1, dword [rdx + 4] - WORD $0x940f; BYTE $0xd0 // sete al + LONG $0x46100ff3; BYTE $0x04 // movss xmm0, dword [rsi + 4] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x04422e0f // ucomiss xmm0, dword [rdx + 4] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x46100ff3; BYTE $0x08 // movss xmm0, dword [rsi + 8] LONG $0x08422e0f // ucomiss xmm0, dword [rdx + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x46100ff3; BYTE $0x0c // movss xmm0, dword [rsi + 12] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0c422e0f // ucomiss xmm0, dword [rdx + 12] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x46100ff3; BYTE $0x10 // movss xmm0, dword [rsi + 16] LONG $0x10422e0f // ucomiss xmm0, dword [rdx + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x46100ff3; BYTE $0x14 // movss xmm0, dword [rsi + 20] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x14422e0f // ucomiss xmm0, dword [rdx + 20] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x46100ff3; BYTE $0x18 // movss xmm0, dword [rsi + 24] LONG $0x18422e0f // ucomiss xmm0, dword [rdx + 24] - LONG $0x46100ff3; BYTE $0x1c // movss xmm0, dword [rsi + 28] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al + LONG $0x46100ff3; BYTE $0x1c // movss xmm0, dword [rsi + 28] LONG $0x1c422e0f // ucomiss xmm0, dword [rdx + 28] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x46100ff3; BYTE $0x20 // movss xmm0, dword [rsi + 32] LONG $0x20422e0f // ucomiss xmm0, dword [rdx + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x46100ff3; BYTE $0x24 // movss xmm0, dword [rsi + 36] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x24422e0f // ucomiss xmm0, dword [rdx + 36] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x46100ff3; BYTE $0x28 // movss xmm0, dword [rsi + 40] LONG $0x28422e0f // ucomiss xmm0, dword [rdx + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x46100ff3; BYTE $0x2c // movss xmm0, dword [rsi + 44] - LONG $0xd1940f41 // sete r9b LONG $0x2c422e0f // ucomiss xmm0, dword [rdx + 44] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x46100ff3; BYTE $0x30 // movss xmm0, dword [rsi + 48] LONG $0x30422e0f // ucomiss xmm0, dword [rdx + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x46100ff3; BYTE $0x34 // movss xmm0, dword [rsi + 52] - LONG $0xd2940f41 // sete r10b LONG $0x34422e0f // ucomiss xmm0, dword [rdx + 52] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x46100ff3; BYTE $0x38 // movss xmm0, dword [rsi + 56] LONG $0x38422e0f // ucomiss xmm0, dword [rdx + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x46100ff3; BYTE $0x3c // movss xmm0, dword [rsi + 60] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x3c422e0f // ucomiss xmm0, dword [rdx + 60] - WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl LONG $0x46100ff3; BYTE $0x40 // movss xmm0, dword [rsi + 64] LONG $0x40422e0f // ucomiss xmm0, dword [rdx + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl LONG $0x46100ff3; BYTE $0x44 // movss xmm0, dword [rsi + 68] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x44422e0f // ucomiss xmm0, dword [rdx + 68] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl LONG $0x46100ff3; BYTE $0x48 // movss xmm0, dword [rsi + 72] - LONG $0xd6940f41 // sete r14b LONG $0x48422e0f // ucomiss xmm0, dword [rdx + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl LONG $0x46100ff3; BYTE $0x4c // movss xmm0, dword [rsi + 76] - LONG $0xd4940f41 // sete r12b LONG $0x4c422e0f // ucomiss xmm0, dword [rdx + 76] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al LONG $0x46100ff3; BYTE $0x50 // movss xmm0, dword [rsi + 80] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] LONG $0x50422e0f // ucomiss xmm0, dword [rdx + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x46100ff3; BYTE $0x54 // movss xmm0, dword [rsi + 84] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x54422e0f // ucomiss xmm0, dword [rdx + 84] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl LONG $0x46100ff3; BYTE $0x58 // movss xmm0, dword [rsi + 88] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] LONG $0x58422e0f // ucomiss xmm0, dword [rdx + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl LONG $0x46100ff3; BYTE $0x5c // movss xmm0, dword [rsi + 92] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x5c422e0f // ucomiss xmm0, dword [rdx + 92] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al LONG $0x46100ff3; BYTE $0x60 // movss xmm0, dword [rsi + 96] - LONG $0xd0940f41 // sete r8b LONG $0x60422e0f // ucomiss xmm0, dword [rdx + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl LONG $0x46100ff3; BYTE $0x64 // movss xmm0, dword [rsi + 100] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x64422e0f // ucomiss xmm0, dword [rdx + 100] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x46100ff3; BYTE $0x68 // movss xmm0, dword [rsi + 104] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x68422e0f // ucomiss xmm0, dword [rdx + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al LONG $0x46100ff3; BYTE $0x6c // movss xmm0, dword [rsi + 108] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x6c422e0f // ucomiss xmm0, dword [rdx + 108] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al LONG $0x46100ff3; BYTE $0x70 // movss xmm0, dword [rsi + 112] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x70422e0f // ucomiss xmm0, dword [rdx + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl LONG $0x46100ff3; BYTE $0x74 // movss xmm0, dword [rsi + 116] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x74422e0f // ucomiss xmm0, dword [rdx + 116] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x46100ff3; BYTE $0x78 // movss xmm0, dword [rsi + 120] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x78422e0f // ucomiss xmm0, dword [rdx + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al LONG $0x46100ff3; BYTE $0x7c // movss xmm0, dword [rsi + 124] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 LONG $0x7c422e0f // ucomiss xmm0, dword [rdx + 124] - LONG $0xd7940f40 // sete dil - WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] - LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] - LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] + LONG $0x04e5c041 // shl r13b, 4 + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x5cb60f44; WORD $0x0324 // movzx r11d, byte [rsp + 3] + WORD $0x0845; BYTE $0xeb // or r11b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0841; BYTE $0xca // or r10b, cl + WORD $0x0841; BYTE $0xda // or r10b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e7c041 // shl r15b, 2 + WORD $0x0841; BYTE $0xcf // or r15b, cl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xf1 // or cl, r14b + LONG $0x241c8845 // mov byte [r12], r11b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + LONG $0x24548845; BYTE $0x02 // mov byte [r12 + 2], r10b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB0_118 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB0_120: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_122: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x04100ff3; BYTE $0x8e // movss xmm0, dword [rsi + 4*rcx] LONG $0x8a042e0f // ucomiss xmm0, dword [rdx + 4*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0xdbf6 // neg bl + WORD $0x9b0f; BYTE $0xd3 // setnp bl + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xd820 // and al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB0_122 JMP LBB0_123 LBB0_57: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_61 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_59: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -2197,7 +2388,7 @@ LBB0_59: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -2206,49 +2397,49 @@ LBB0_59: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_59 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_61: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_65 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB0_63: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7940f41 // sete r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7940f40 // sete dil @@ -2263,16 +2454,16 @@ LBB0_63: LONG $0xd6940f41 // sete r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd0940f41 // sete r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4940f41 // sete r12b @@ -2281,144 +2472,144 @@ LBB0_63: LONG $0xd5940f41 // sete r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1940f41 // sete r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0940f41 // sete r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB0_63 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB0_65: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_67: @@ -2429,30 +2620,30 @@ LBB0_67: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_67 JMP LBB0_123 LBB0_90: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB0_94 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB0_92: WORD $0x0e8b // mov ecx, dword [rsi] @@ -2465,7 +2656,7 @@ LBB0_92: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -2474,49 +2665,49 @@ LBB0_92: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB0_92 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB0_94: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB0_98 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB0_96: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] + LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454940f; BYTE $0x15 // sete byte [rsp + 21] + LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454940f; BYTE $0x16 // sete byte [rsp + 22] + LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454940f; BYTE $0x17 // sete byte [rsp + 23] + LONG $0x2454940f; BYTE $0x03 // sete byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] + LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5940f41 // sete r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0940f41 // sete r8b @@ -2528,165 +2719,165 @@ LBB0_96: LONG $0xd7940f41 // sete r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454940f; BYTE $0x05 // sete byte [rsp + 5] + LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454940f; BYTE $0x06 // sete byte [rsp + 6] + LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454940f; BYTE $0x07 // sete byte [rsp + 7] + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x940f; BYTE $0xd3 // sete bl + LONG $0xd7940f40 // sete dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454940f; BYTE $0x0a // sete byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2940f41 // sete r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6940f41 // sete r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4940f41 // sete r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454940f; BYTE $0x09 // sete byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454940f; BYTE $0x0b // sete byte [rsp + 11] + LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454940f; BYTE $0x0c // sete byte [rsp + 12] + LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1940f41 // sete r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] + LONG $0x2454940f; BYTE $0x14 // sete byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454940f; BYTE $0x0d // sete byte [rsp + 13] + LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454940f; BYTE $0x0e // sete byte [rsp + 14] + LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454940f; BYTE $0x0f // sete byte [rsp + 15] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] + LONG $0x2454940f; BYTE $0x13 // sete byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454940f; BYTE $0x11 // sete byte [rsp + 17] + LONG $0x2454940f; BYTE $0x12 // sete byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x940f; BYTE $0xd1 // sete cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB0_96 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB0_98: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB0_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB0_100: @@ -2697,16 +2888,16 @@ LBB0_100: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB0_100 LBB0_123: @@ -2747,7 +2938,7 @@ DATA LCDATA1<>+0x0f0(SB)/8, $0x4040404040404040 DATA LCDATA1<>+0x0f8(SB)/8, $0x4040404040404040 GLOBL LCDATA1<>(SB), 8, $256 -TEXT ·_comparison_equal_arr_scalar_sse4(SB), $344-48 +TEXT ·_comparison_equal_arr_scalar_sse4(SB), $328-48 MOVQ typ+0(FP), DI MOVQ left+8(FP), SI @@ -2758,21 +2949,21 @@ TEXT ·_comparison_equal_arr_scalar_sse4(SB), $344-48 MOVQ SP, BP ADDQ $16, SP ANDQ $-16, SP - MOVQ BP, 320(SP) + MOVQ BP, 304(SP) LEAQ LCDATA1<>(SB), BP WORD $0x894d; BYTE $0xc2 // mov r10, r8 WORD $0x8949; BYTE $0xce // mov r14, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JG LBB1_26 + JG LBB1_27 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JLE LBB1_2 WORD $0xff83; BYTE $0x04 // cmp edi, 4 - JE LBB1_100 + JE LBB1_101 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB1_123 + JE LBB1_124 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB1_202 + JNE LBB1_199 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -2794,6 +2985,7 @@ LBB1_15: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 + WORD $0x894d; BYTE $0xf1 // mov r9, r14 LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] @@ -2815,20 +3007,20 @@ LBB1_17: JL LBB1_21 QUAD $0x000000902494894c // mov qword [rsp + 144], r10 QUAD $0x00000098249c894c // mov qword [rsp + 152], r11 - QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 + QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 LBB1_19: QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - QUAD $0x000000e02494940f // sete byte [rsp + 224] + QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6940f41 // sete r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d - QUAD $0x000000d02494940f // sete byte [rsp + 208] + QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x106e3944 // cmp dword [rsi + 16], r13d - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] LONG $0x146e3944 // cmp dword [rsi + 20], r13d LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x186e3944 // cmp dword [rsi + 24], r13d @@ -2836,142 +3028,143 @@ LBB1_19: LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3940f41 // sete r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d LONG $0xd4940f41 // sete r12b LONG $0x386e3944 // cmp dword [rsi + 56], r13d - QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x3c6e3944 // cmp dword [rsi + 60], r13d WORD $0x940f; BYTE $0xd1 // sete cl LONG $0x406e3944 // cmp dword [rsi + 64], r13d - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x446e3944 // cmp dword [rsi + 68], r13d - QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0x486e3944 // cmp dword [rsi + 72], r13d - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x4c6e3944 // cmp dword [rsi + 76], r13d - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x506e3944 // cmp dword [rsi + 80], r13d - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x546e3944 // cmp dword [rsi + 84], r13d - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x586e3944 // cmp dword [rsi + 88], r13d LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x586e3944 // cmp dword [rsi + 88], r13d + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d LONG $0xd7940f41 // sete r15b LONG $0x606e3944 // cmp dword [rsi + 96], r13d - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x646e3944 // cmp dword [rsi + 100], r13d - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x686e3944 // cmp dword [rsi + 104], r13d - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x706e3944 // cmp dword [rsi + 112], r13d + LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x746e3944 // cmp dword [rsi + 116], r13d + LONG $0x706e3944 // cmp dword [rsi + 112], r13d LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x786e3944 // cmp dword [rsi + 120], r13d + LONG $0x746e3944 // cmp dword [rsi + 116], r13d LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x786e3944 // cmp dword [rsi + 120], r13d + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x000000b024bcb60f // movzx edi, byte [rsp + 176] + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc000 // add al, al + LONG $0x10244402 // add al, byte [rsp + 16] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c68349 // add r14, 4 - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 JNE LBB1_19 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] @@ -2979,7 +3172,7 @@ LBB1_19: LBB1_21: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 @@ -2989,12 +3182,13 @@ LBB1_21: LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB1_147: +LBB1_145: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 + WORD $0x894d; BYTE $0xf7 // mov r15, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 @@ -3017,18 +3211,18 @@ LBB1_147: WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB1_147 + JNE LBB1_145 JMP LBB1_24 -LBB1_26: +LBB1_27: WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JLE LBB1_27 + JLE LBB1_28 WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB1_162 + JE LBB1_160 WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB1_174 + JE LBB1_172 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB1_202 + JNE LBB1_199 LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xda490f4d // cmovns r11, r10 @@ -3038,18 +3232,21 @@ LBB1_26: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x02100ff2 // movsd xmm0, qword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_49 + JE LBB1_50 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_47: +LBB1_48: LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] LONG $0x08768d48 // lea rsi, [rsi + 8] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xca20 // and dl, cl WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 + WORD $0x894d; BYTE $0xf7 // mov r15, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] @@ -3062,191 +3259,282 @@ LBB1_47: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_47 + JNE LBB1_48 LONG $0x01c68349 // add r14, 1 -LBB1_49: +LBB1_50: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_53 + JL LBB1_54 QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 - QUAD $0x000000e0249c894c // mov qword [rsp + 224], r11 + QUAD $0x00000110249c894c // mov qword [rsp + 272], r11 + QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB1_51: +LBB1_52: QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - QUAD $0x000000d02494940f // sete byte [rsp + 208] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al LONG $0x462e0f66; BYTE $0x10 // ucomisd xmm0, qword [rsi + 16] - LONG $0xd6940f41 // sete r14b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x462e0f66; BYTE $0x18 // ucomisd xmm0, qword [rsi + 24] - LONG $0xd5940f41 // sete r13b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x462e0f66; BYTE $0x20 // ucomisd xmm0, qword [rsi + 32] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x462e0f66; BYTE $0x28 // ucomisd xmm0, qword [rsi + 40] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al LONG $0x462e0f66; BYTE $0x30 // ucomisd xmm0, qword [rsi + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x462e0f66; BYTE $0x38 // ucomisd xmm0, qword [rsi + 56] - WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl LONG $0x462e0f66; BYTE $0x40 // ucomisd xmm0, qword [rsi + 64] - QUAD $0x000000b02494940f // sete byte [rsp + 176] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x462e0f66; BYTE $0x48 // ucomisd xmm0, qword [rsi + 72] - WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x462e0f66; BYTE $0x50 // ucomisd xmm0, qword [rsi + 80] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x462e0f66; BYTE $0x58 // ucomisd xmm0, qword [rsi + 88] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x462e0f66; BYTE $0x60 // ucomisd xmm0, qword [rsi + 96] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x462e0f66; BYTE $0x68 // ucomisd xmm0, qword [rsi + 104] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x462e0f66; BYTE $0x70 // ucomisd xmm0, qword [rsi + 112] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x462e0f66; BYTE $0x78 // ucomisd xmm0, qword [rsi + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl QUAD $0x00000080862e0f66 // ucomisd xmm0, qword [rsi + 128] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl QUAD $0x00000088862e0f66 // ucomisd xmm0, qword [rsi + 136] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl QUAD $0x00000090862e0f66 // ucomisd xmm0, qword [rsi + 144] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl QUAD $0x00000098862e0f66 // ucomisd xmm0, qword [rsi + 152] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl QUAD $0x000000a0862e0f66 // ucomisd xmm0, qword [rsi + 160] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xc0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 192], cl QUAD $0x000000a8862e0f66 // ucomisd xmm0, qword [rsi + 168] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl QUAD $0x000000b0862e0f66 // ucomisd xmm0, qword [rsi + 176] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - QUAD $0x000000b8862e0f66 // ucomisd xmm0, qword [rsi + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al + QUAD $0x000000b8862e0f66 // ucomisd xmm0, qword [rsi + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al QUAD $0x000000c0862e0f66 // ucomisd xmm0, qword [rsi + 192] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xd0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 208], cl QUAD $0x000000c8862e0f66 // ucomisd xmm0, qword [rsi + 200] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al QUAD $0x000000d0862e0f66 // ucomisd xmm0, qword [rsi + 208] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al QUAD $0x000000d8862e0f66 // ucomisd xmm0, qword [rsi + 216] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al QUAD $0x000000e0862e0f66 // ucomisd xmm0, qword [rsi + 224] - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl QUAD $0x000000e8862e0f66 // ucomisd xmm0, qword [rsi + 232] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl QUAD $0x000000f0862e0f66 // ucomisd xmm0, qword [rsi + 240] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al QUAD $0x000000f8862e0f66 // ucomisd xmm0, qword [rsi + 248] - LONG $0xd0940f41 // sete r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x000000d0248c0244 // add r9b, byte [rsp + 208] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al + WORD $0x0045; BYTE $0xed // add r13b, r13b + LONG $0x246c0244; BYTE $0x30 // add r13b, byte [rsp + 48] + WORD $0x8944; BYTE $0xe8 // mov eax, r13d LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000008024bcb60f // movzx edi, byte [rsp + 128] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xc000 // add al, al - LONG $0x68244402 // add al, byte [rsp + 104] - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x8841; BYTE $0x1e // mov byte [r14], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] + LONG $0x6cb60f44; WORD $0x3824 // movzx r13d, byte [rsp + 56] + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x64b60f44; WORD $0x1824 // movzx r12d, byte [rsp + 24] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] WORD $0xc900 // add cl, cl LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x04244488 // mov byte [rsp + 4], al + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xcb08 // or bl, cl + LONG $0x6cb60f44; WORD $0x1024 // movzx r13d, byte [rsp + 16] + LONG $0x04e5c041 // shl r13b, 4 + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + LONG $0x08244488 // mov byte [rsp + 8], al + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x74b60f44; WORD $0x6024 // movzx r14d, byte [rsp + 96] + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + WORD $0xc900 // add cl, cl + LONG $0x58244c02 // add cl, byte [rsp + 88] + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xcb08 // or bl, cl + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + QUAD $0x000000c0248cb60f // movzx ecx, byte [rsp + 192] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + WORD $0xd908 // or cl, bl + WORD $0x8941; BYTE $0xcc // mov r12d, ecx + QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + WORD $0x0844; BYTE $0xe8 // or al, r13b + QUAD $0x000000b0249cb60f // movzx ebx, byte [rsp + 176] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c041 // shl r15b, 6 + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x24740a44; BYTE $0x08 // or r14b, byte [rsp + 8] LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0x0845; BYTE $0xf8 // or r8b, r15b + WORD $0x0845; BYTE $0xe0 // or r8b, r12b + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x000000d0249c0244 // add r11b, byte [rsp + 208] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + QUAD $0x00000098249cb60f // movzx ebx, byte [rsp + 152] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xcb // or bl, r9b + WORD $0x8941; BYTE $0xd9 // mov r9d, ebx + WORD $0x0188 // mov byte [rcx], al + QUAD $0x000000a0249cb60f // movzx ebx, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x01718844 // mov byte [rcx + 1], r14b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x0844; BYTE $0xca // or dl, r9b + LONG $0x02418844 // mov byte [rcx + 2], r8b + WORD $0x5188; BYTE $0x03 // mov byte [rcx + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c68349 // add r14, 4 - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 - JNE LBB1_51 + LONG $0x04c18348 // add rcx, 4 + WORD $0x8949; BYTE $0xce // mov r14, rcx + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 + JNE LBB1_52 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] -LBB1_53: +LBB1_54: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB1_197 + JNE LBB1_195 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB1_199 + JMP LBB1_197 LBB1_2: WORD $0xff83; BYTE $0x02 // cmp edi, 2 - JE LBB1_56 + JE LBB1_57 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB1_202 + JNE LBB1_199 WORD $0x8a44; BYTE $0x1a // mov r11b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -3268,6 +3556,7 @@ LBB1_6: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 + WORD $0x894d; BYTE $0xf4 // mov r12, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] @@ -3288,178 +3577,177 @@ LBB1_8: LONG $0x20fa8349 // cmp r10, 32 JL LBB1_9 LONG $0x10ff8349 // cmp r15, 16 - LONG $0x245c8844; BYTE $0x08 // mov byte [rsp + 8], r11b + LONG $0x245c8844; BYTE $0x04 // mov byte [rsp + 4], r11b QUAD $0x000000902494894c // mov qword [rsp + 144], r10 QUAD $0x0000010024bc894c // mov qword [rsp + 256], r15 - JB LBB1_83 + JB LBB1_84 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB1_86 + JAE LBB1_87 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB1_86 + JAE LBB1_87 -LBB1_83: +LBB1_84: WORD $0xc031 // xor eax, eax - QUAD $0x000000f824848948 // mov qword [rsp + 248], rax - LONG $0x2474894c; BYTE $0x78 // mov qword [rsp + 120], r14 - -LBB1_89: - WORD $0x894d; BYTE $0xfe // mov r14, r15 - QUAD $0x000000f824b42b4c // sub r14, qword [rsp + 248] - QUAD $0x0000009824b4894c // mov qword [rsp + 152], r14 + QUAD $0x000000e824848948 // mov qword [rsp + 232], rax + LONG $0x2474894c; BYTE $0x58 // mov qword [rsp + 88], r14 LBB1_90: + QUAD $0x000000e824bc2b4c // sub r15, qword [rsp + 232] + QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + +LBB1_91: WORD $0x8948; BYTE $0xf1 // mov rcx, rsi WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b - QUAD $0x000000c02494940f // sete byte [rsp + 192] + QUAD $0x000000a02494940f // sete byte [rsp + 160] LONG $0x015e3844 // cmp byte [rsi + 1], r11b LONG $0xd6940f40 // sete sil LONG $0x02593844 // cmp byte [rcx + 2], r11b LONG $0xd7940f41 // sete r15b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x03 // cmp byte [rcx + 3], al LONG $0xd4940f41 // sete r12b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x04 // cmp byte [rcx + 4], al - QUAD $0x000000d02494940f // sete byte [rsp + 208] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x05 // cmp byte [rcx + 5], al - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x06 // cmp byte [rcx + 6], al - QUAD $0x000000e02494940f // sete byte [rsp + 224] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + QUAD $0x000000d02494940f // sete byte [rsp + 208] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x07 // cmp byte [rcx + 7], al LONG $0xd1940f41 // sete r9b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x08 // cmp byte [rcx + 8], al - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + QUAD $0x000000c02494940f // sete byte [rsp + 192] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x09 // cmp byte [rcx + 9], al WORD $0x940f; BYTE $0xd2 // sete dl - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0a // cmp byte [rcx + 10], al LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0b // cmp byte [rcx + 11], al LONG $0xd2940f41 // sete r10b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0c // cmp byte [rcx + 12], al LONG $0xd6940f41 // sete r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0d // cmp byte [rcx + 13], al LONG $0xd5940f41 // sete r13b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0e // cmp byte [rcx + 14], al - QUAD $0x000000b02494940f // sete byte [rsp + 176] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0f // cmp byte [rcx + 15], al LONG $0xd0940f41 // sete r8b - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x10 // cmp byte [rcx + 16], bl - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x11 // cmp byte [rcx + 17], bl - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x12 // cmp byte [rcx + 18], bl - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x13 // cmp byte [rcx + 19], bl LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x14 // cmp byte [rcx + 20], bl + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x13 // cmp byte [rcx + 19], bl LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x15 // cmp byte [rcx + 21], bl + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x14 // cmp byte [rcx + 20], bl LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x15 // cmp byte [rcx + 21], bl + LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x16 // cmp byte [rcx + 22], bl LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x17 // cmp byte [rcx + 23], bl LONG $0xd3940f41 // sete r11b - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x18 // cmp byte [rcx + 24], bl - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x19 // cmp byte [rcx + 25], bl - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x1a // cmp byte [rcx + 26], bl LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x1a // cmp byte [rcx + 26], bl + LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1b // cmp byte [rcx + 27], bl - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1c // cmp byte [rcx + 28], bl - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x1d // cmp byte [rcx + 29], bl LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x1d // cmp byte [rcx + 29], bl + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1e // cmp byte [rcx + 30], bl QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1f // cmp byte [rcx + 31], bl WORD $0x940f; BYTE $0xd3 // sete bl WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] - QUAD $0x000000e02484b60f // movzx eax, byte [rsp + 224] + QUAD $0x000000a024b40240 // add sil, byte [rsp + 160] + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e1c041 // shl r9b, 7 WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] + LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b - LONG $0x7cb60f44; WORD $0x0824 // movzx r15d, byte [rsp + 8] + LONG $0x7cb60f44; WORD $0x0424 // movzx r15d, byte [rsp + 4] LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b LONG $0x03e2c041 // shl r10b, 3 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] + LONG $0x2454b60f; BYTE $0x38 // movzx edx, byte [rsp + 56] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0xc208 // or dl, al LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x05e5c041 // shl r13b, 5 WORD $0x0845; BYTE $0xf5 // or r13b, r14b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] + LONG $0x2474b60f; BYTE $0x70 // movzx esi, byte [rsp + 112] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xf0 // or r8b, sil WORD $0x0841; BYTE $0xd1 // or r9b, dl WORD $0x0845; BYTE $0xe8 // or r8b, r13b - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] WORD $0xd200 // add dl, dl - LONG $0x80249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 128] + LONG $0x78245402 // add dl, byte [rsp + 120] WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] + LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] + LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] WORD $0x8844; BYTE $0x0a // mov byte [rdx], r9b LONG $0x247cb60f; BYTE $0x48 // movzx edi, byte [rsp + 72] LONG $0x06e7c040 // shl dil, 6 @@ -3467,23 +3755,23 @@ LBB1_90: WORD $0x0841; BYTE $0xfb // or r11b, dil LONG $0x01428844 // mov byte [rdx + 1], r8b WORD $0x0841; BYTE $0xf3 // or r11b, sil - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x38244402 // add al, byte [rsp + 56] + LONG $0x20244402 // add al, byte [rsp + 32] WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf0 // or al, sil QUAD $0x0000008824b4b60f // movzx esi, byte [rsp + 136] @@ -3496,18 +3784,18 @@ LBB1_90: WORD $0x5a88; BYTE $0x03 // mov byte [rdx + 3], bl LONG $0x20718d48 // lea rsi, [rcx + 32] LONG $0x04c28348 // add rdx, 4 - LONG $0x24548948; BYTE $0x78 // mov qword [rsp + 120], rdx + LONG $0x24548948; BYTE $0x58 // mov qword [rsp + 88], rdx QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB1_90 + JNE LBB1_91 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] - JMP LBB1_92 + JMP LBB1_93 -LBB1_27: +LBB1_28: WORD $0xff83; BYTE $0x07 // cmp edi, 7 - JE LBB1_148 + JE LBB1_146 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB1_202 + JNE LBB1_199 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -3517,10 +3805,10 @@ LBB1_27: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_33 + JE LBB1_34 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_31: +LBB1_32: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] WORD $0x940f; BYTE $0xd2 // sete dl @@ -3529,6 +3817,7 @@ LBB1_31: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 + WORD $0x894d; BYTE $0xf1 // mov r9, r14 LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] @@ -3541,29 +3830,29 @@ LBB1_31: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_31 + JNE LBB1_32 LONG $0x01c68349 // add r14, 1 -LBB1_33: +LBB1_34: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_37 + JL LBB1_38 QUAD $0x000000902494894c // mov qword [rsp + 144], r10 QUAD $0x00000098249c894c // mov qword [rsp + 152], r11 - QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 + QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 -LBB1_35: +LBB1_36: QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - QUAD $0x000000e02494940f // sete byte [rsp + 224] + QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6940f41 // sete r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 - QUAD $0x000000d02494940f // sete byte [rsp + 208] + QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x206e394c // cmp qword [rsi + 32], r13 - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] LONG $0x286e394c // cmp qword [rsi + 40], r13 LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x306e394c // cmp qword [rsi + 48], r13 @@ -3571,165 +3860,167 @@ LBB1_35: LONG $0x386e394c // cmp qword [rsi + 56], r13 WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3940f41 // sete r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 LONG $0xd4940f41 // sete r12b LONG $0x706e394c // cmp qword [rsi + 112], r13 - QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x786e394c // cmp qword [rsi + 120], r13 WORD $0x940f; BYTE $0xd1 // sete cl LONG $0x80ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 128], r13 - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x88ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 136], r13 - QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0x90ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 144], r13 - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x98ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 152], r13 - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0xa0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 160], r13 - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0xa8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 168], r13 - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 LONG $0xd7940f41 // sete r15b LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 + LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 + LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 + LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x000000b024bcb60f // movzx edi, byte [rsp + 176] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b + WORD $0x8941; BYTE $0xc2 // mov r10d, eax QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x05e4c041 // shl r12b, 5 + WORD $0x0845; BYTE $0xdc // or r12b, r11b + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] + LONG $0x06e7c040 // shl dil, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0x0840; BYTE $0xf9 // or cl, dil + WORD $0xc308 // or bl, al + WORD $0x0844; BYTE $0xe1 // or cl, r12b + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc000 // add al, al + LONG $0x10244402 // add al, byte [rsp + 16] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c68349 // add r14, 4 - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 - JNE LBB1_35 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 + JNE LBB1_36 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] -LBB1_37: +LBB1_38: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JE LBB1_39 + JE LBB1_40 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB1_161: +LBB1_159: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 + WORD $0x894d; BYTE $0xf7 // mov r15, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 @@ -3752,10 +4043,10 @@ LBB1_161: WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB1_161 - JMP LBB1_40 + JNE LBB1_159 + JMP LBB1_41 -LBB1_56: +LBB1_57: WORD $0x8a44; BYTE $0x1a // mov r11b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -3765,10 +4056,10 @@ LBB1_56: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_60 + JE LBB1_61 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_58: +LBB1_59: WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b LONG $0x01768d48 // lea rsi, [rsi + 1] WORD $0x940f; BYTE $0xd2 // sete dl @@ -3777,6 +4068,7 @@ LBB1_58: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 + WORD $0x894d; BYTE $0xf4 // mov r12, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] @@ -3789,148 +4081,147 @@ LBB1_58: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_58 + JNE LBB1_59 LONG $0x01c68349 // add r14, 1 -LBB1_60: +LBB1_61: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_61 + JL LBB1_62 LONG $0x10ff8349 // cmp r15, 16 - LONG $0x245c8844; BYTE $0x08 // mov byte [rsp + 8], r11b + LONG $0x245c8844; BYTE $0x04 // mov byte [rsp + 4], r11b QUAD $0x000000902494894c // mov qword [rsp + 144], r10 QUAD $0x0000010024bc894c // mov qword [rsp + 256], r15 - JB LBB1_63 + JB LBB1_64 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB1_66 + JAE LBB1_67 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB1_66 + JAE LBB1_67 -LBB1_63: +LBB1_64: WORD $0xc031 // xor eax, eax - QUAD $0x000000f824848948 // mov qword [rsp + 248], rax - LONG $0x2474894c; BYTE $0x50 // mov qword [rsp + 80], r14 - -LBB1_69: - WORD $0x894d; BYTE $0xfe // mov r14, r15 - QUAD $0x000000f824b42b4c // sub r14, qword [rsp + 248] - QUAD $0x0000009824b4894c // mov qword [rsp + 152], r14 + QUAD $0x000000e824848948 // mov qword [rsp + 232], rax + LONG $0x2474894c; BYTE $0x38 // mov qword [rsp + 56], r14 LBB1_70: + QUAD $0x000000e824bc2b4c // sub r15, qword [rsp + 232] + QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + +LBB1_71: WORD $0x8948; BYTE $0xf1 // mov rcx, rsi WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b - QUAD $0x000000c02494940f // sete byte [rsp + 192] + QUAD $0x000000a02494940f // sete byte [rsp + 160] LONG $0x015e3844 // cmp byte [rsi + 1], r11b LONG $0xd6940f40 // sete sil LONG $0x02593844 // cmp byte [rcx + 2], r11b LONG $0xd7940f41 // sete r15b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x03 // cmp byte [rcx + 3], al LONG $0xd4940f41 // sete r12b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x04 // cmp byte [rcx + 4], al - QUAD $0x000000d02494940f // sete byte [rsp + 208] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x05 // cmp byte [rcx + 5], al LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x06 // cmp byte [rcx + 6], al - QUAD $0x000000e02494940f // sete byte [rsp + 224] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + QUAD $0x000000d02494940f // sete byte [rsp + 208] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x07 // cmp byte [rcx + 7], al LONG $0xd1940f41 // sete r9b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x08 // cmp byte [rcx + 8], al - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + QUAD $0x000000c02494940f // sete byte [rsp + 192] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x09 // cmp byte [rcx + 9], al WORD $0x940f; BYTE $0xd2 // sete dl - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0a // cmp byte [rcx + 10], al LONG $0xd7940f40 // sete dil - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0b // cmp byte [rcx + 11], al LONG $0xd2940f41 // sete r10b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0c // cmp byte [rcx + 12], al LONG $0xd6940f41 // sete r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0d // cmp byte [rcx + 13], al LONG $0xd5940f41 // sete r13b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0e // cmp byte [rcx + 14], al - QUAD $0x000000b02494940f // sete byte [rsp + 176] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] WORD $0x4138; BYTE $0x0f // cmp byte [rcx + 15], al LONG $0xd0940f41 // sete r8b - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x10 // cmp byte [rcx + 16], bl - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x11 // cmp byte [rcx + 17], bl - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x12 // cmp byte [rcx + 18], bl - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x13 // cmp byte [rcx + 19], bl LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x14 // cmp byte [rcx + 20], bl + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x13 // cmp byte [rcx + 19], bl LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x14 // cmp byte [rcx + 20], bl + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x15 // cmp byte [rcx + 21], bl LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x16 // cmp byte [rcx + 22], bl - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x17 // cmp byte [rcx + 23], bl LONG $0xd3940f41 // sete r11b - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x18 // cmp byte [rcx + 24], bl - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x19 // cmp byte [rcx + 25], bl - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x1a // cmp byte [rcx + 26], bl LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x1a // cmp byte [rcx + 26], bl + LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1b // cmp byte [rcx + 27], bl - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1c // cmp byte [rcx + 28], bl - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0x5938; BYTE $0x1d // cmp byte [rcx + 29], bl LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] + WORD $0x5938; BYTE $0x1d // cmp byte [rcx + 29], bl + LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1e // cmp byte [rcx + 30], bl QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x04 // movzx ebx, byte [rsp + 4] WORD $0x5938; BYTE $0x1f // cmp byte [rcx + 31], bl WORD $0x940f; BYTE $0xd3 // sete bl WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] - QUAD $0x000000e02484b60f // movzx eax, byte [rsp + 224] + QUAD $0x000000a024b40240 // add sil, byte [rsp + 160] + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e1c041 // shl r9b, 7 WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] + LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b - LONG $0x7cb60f44; WORD $0x0824 // movzx r15d, byte [rsp + 8] + LONG $0x7cb60f44; WORD $0x0424 // movzx r15d, byte [rsp + 4] LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b LONG $0x03e2c041 // shl r10b, 3 @@ -3942,25 +4233,25 @@ LBB1_70: WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x05e5c041 // shl r13b, 5 WORD $0x0845; BYTE $0xf5 // or r13b, r14b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] + LONG $0x2474b60f; BYTE $0x70 // movzx esi, byte [rsp + 112] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xf0 // or r8b, sil WORD $0x0841; BYTE $0xd1 // or r9b, dl WORD $0x0845; BYTE $0xe8 // or r8b, r13b - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] WORD $0xd200 // add dl, dl - LONG $0x80249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 128] + LONG $0x78245402 // add dl, byte [rsp + 120] WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] + LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx @@ -3968,31 +4259,31 @@ LBB1_70: WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] + LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] WORD $0x8844; BYTE $0x0a // mov byte [rdx], r9b - LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] + LONG $0x247cb60f; BYTE $0x58 // movzx edi, byte [rsp + 88] LONG $0x06e7c040 // shl dil, 6 LONG $0x07e3c041 // shl r11b, 7 WORD $0x0841; BYTE $0xfb // or r11b, dil LONG $0x01428844 // mov byte [rdx + 1], r8b WORD $0x0841; BYTE $0xf3 // or r11b, sil - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x38244402 // add al, byte [rsp + 56] + LONG $0x20244402 // add al, byte [rsp + 32] WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf0 // or al, sil QUAD $0x0000008824b4b60f // movzx esi, byte [rsp + 136] @@ -4005,14 +4296,14 @@ LBB1_70: WORD $0x5a88; BYTE $0x03 // mov byte [rdx + 3], bl LONG $0x20718d48 // lea rsi, [rcx + 32] LONG $0x04c28348 // add rdx, 4 - LONG $0x24548948; BYTE $0x50 // mov qword [rsp + 80], rdx + LONG $0x24548948; BYTE $0x38 // mov qword [rsp + 56], rdx QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB1_70 + JNE LBB1_71 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] - JMP LBB1_72 + JMP LBB1_73 -LBB1_148: +LBB1_146: WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -4022,10 +4313,10 @@ LBB1_148: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_152 + JE LBB1_150 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_150: +LBB1_148: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x04768d48 // lea rsi, [rsi + 4] WORD $0x940f; BYTE $0xd2 // sete dl @@ -4034,6 +4325,7 @@ LBB1_150: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 + WORD $0x894d; BYTE $0xf1 // mov r9, r14 LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] @@ -4046,29 +4338,29 @@ LBB1_150: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_150 + JNE LBB1_148 LONG $0x01c68349 // add r14, 1 -LBB1_152: +LBB1_150: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_156 + JL LBB1_154 QUAD $0x000000902494894c // mov qword [rsp + 144], r10 QUAD $0x00000098249c894c // mov qword [rsp + 152], r11 - QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 + QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 -LBB1_154: +LBB1_152: QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - QUAD $0x000000e02494940f // sete byte [rsp + 224] + QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6940f41 // sete r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d - QUAD $0x000000d02494940f // sete byte [rsp + 208] + QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x106e3944 // cmp dword [rsi + 16], r13d - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] LONG $0x146e3944 // cmp dword [rsi + 20], r13d LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x186e3944 // cmp dword [rsi + 24], r13d @@ -4076,175 +4368,176 @@ LBB1_154: LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3940f41 // sete r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d LONG $0xd4940f41 // sete r12b LONG $0x386e3944 // cmp dword [rsi + 56], r13d - QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x3c6e3944 // cmp dword [rsi + 60], r13d WORD $0x940f; BYTE $0xd1 // sete cl LONG $0x406e3944 // cmp dword [rsi + 64], r13d - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x446e3944 // cmp dword [rsi + 68], r13d - QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0x486e3944 // cmp dword [rsi + 72], r13d - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x4c6e3944 // cmp dword [rsi + 76], r13d - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0x506e3944 // cmp dword [rsi + 80], r13d - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x546e3944 // cmp dword [rsi + 84], r13d - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x586e3944 // cmp dword [rsi + 88], r13d LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0x586e3944 // cmp dword [rsi + 88], r13d + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d LONG $0xd7940f41 // sete r15b LONG $0x606e3944 // cmp dword [rsi + 96], r13d - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x646e3944 // cmp dword [rsi + 100], r13d - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x686e3944 // cmp dword [rsi + 104], r13d - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x706e3944 // cmp dword [rsi + 112], r13d + LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x746e3944 // cmp dword [rsi + 116], r13d + LONG $0x706e3944 // cmp dword [rsi + 112], r13d LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x786e3944 // cmp dword [rsi + 120], r13d + LONG $0x746e3944 // cmp dword [rsi + 116], r13d LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0x786e3944 // cmp dword [rsi + 120], r13d + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x000000b024bcb60f // movzx edi, byte [rsp + 176] + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc000 // add al, al + LONG $0x10244402 // add al, byte [rsp + 16] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c68349 // add r14, 4 - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 - JNE LBB1_154 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 + JNE LBB1_152 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] -LBB1_156: +LBB1_154: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB1_158 + JNE LBB1_156 LBB1_23: WORD $0x3145; BYTE $0xdb // xor r11d, r11d JMP LBB1_24 -LBB1_100: - LONG $0x2ab70f44 // movzx r13d, word [rdx] - LONG $0x1f5a8d4d // lea r11, [r10 + 31] +LBB1_101: + LONG $0x1ab70f44 // movzx r11d, word [rdx] + LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xda490f4d // cmovns r11, r10 + LONG $0xfa490f4d // cmovns r15, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_104 + JE LBB1_105 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_102: - LONG $0x2e394466 // cmp word [rsi], r13w +LBB1_103: + LONG $0x1e394466 // cmp word [rsi], r11w LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x940f; BYTE $0xd2 // sete dl WORD $0xdaf6 // neg dl @@ -4252,6 +4545,7 @@ LBB1_102: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 + WORD $0x894d; BYTE $0xf4 // mov r12, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] @@ -4264,151 +4558,182 @@ LBB1_102: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_102 + JNE LBB1_103 LONG $0x01c68349 // add r14, 1 -LBB1_104: - LONG $0x05fbc149 // sar r11, 5 - LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_105 - LONG $0x08fb8349 // cmp r11, 8 - QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x00000098249c894c // mov qword [rsp + 152], r11 - JB LBB1_107 - WORD $0x894c; BYTE $0xd8 // mov rax, r11 - LONG $0x06e0c148 // shl rax, 6 - WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB1_110 - LONG $0x9e048d4b // lea rax, [r14 + 4*r11] - WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB1_110 +LBB1_105: + LONG $0x05ffc149 // sar r15, 5 + LONG $0x20fa8349 // cmp r10, 32 + JL LBB1_106 + LONG $0x08ff8349 // cmp r15, 8 + LONG $0x245c8944; BYTE $0x04 // mov dword [rsp + 4], r11d + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x000000f024bc894c // mov qword [rsp + 240], r15 + JB LBB1_108 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 + LONG $0x06e0c148 // shl rax, 6 + WORD $0x0148; BYTE $0xf0 // add rax, rsi + WORD $0x3949; BYTE $0xc6 // cmp r14, rax + JAE LBB1_111 + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] + WORD $0x3948; BYTE $0xf0 // cmp rax, rsi + JBE LBB1_111 -LBB1_107: +LBB1_108: WORD $0xc031 // xor eax, eax - LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax LONG $0x2474894c; BYTE $0x08 // mov qword [rsp + 8], r14 -LBB1_113: - LONG $0x245c2b4c; BYTE $0x10 // sub r11, qword [rsp + 16] - QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 - LBB1_114: - WORD $0x8949; BYTE $0xf3 // mov r11, rsi - LONG $0x2e394466 // cmp word [rsi], r13w - QUAD $0x000000e02494940f // sete byte [rsp + 224] - LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w + LONG $0x247c2b4c; BYTE $0x20 // sub r15, qword [rsp + 32] + QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + +LBB1_115: + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0x1e394466 // cmp word [rsi], r11w + QUAD $0x000000a02494940f // sete byte [rsp + 160] + LONG $0x5e394466; BYTE $0x02 // cmp word [rsi + 2], r11w LONG $0xd0940f41 // sete r8b - LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w + LONG $0x5e394466; BYTE $0x04 // cmp word [rsi + 4], r11w LONG $0xd6940f41 // sete r14b - LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w - QUAD $0x000000d02494940f // sete byte [rsp + 208] - LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x06463966 // cmp word [rsi + 6], ax + LONG $0xd5940f41 // sete r13b + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x08463966 // cmp word [rsi + 8], ax + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x0a463966 // cmp word [rsi + 10], ax LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w - WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x0c463966 // cmp word [rsi + 12], ax + QUAD $0x000000d02494940f // sete byte [rsp + 208] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x0e463966 // cmp word [rsi + 14], ax WORD $0x940f; BYTE $0xd3 // sete bl - LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x10463966 // cmp word [rsi + 16], ax + QUAD $0x000000c02494940f // sete byte [rsp + 192] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x12463966 // cmp word [rsi + 18], ax WORD $0x940f; BYTE $0xd1 // sete cl - LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x14463966 // cmp word [rsi + 20], ax LONG $0xd6940f40 // sete sil - LONG $0x6b394566; BYTE $0x16 // cmp word [r11 + 22], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x16 // cmp word [r10 + 22], ax LONG $0xd1940f41 // sete r9b - LONG $0x6b394566; BYTE $0x18 // cmp word [r11 + 24], r13w - LONG $0xd2940f41 // sete r10b - LONG $0x6b394566; BYTE $0x1a // cmp word [r11 + 26], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x18 // cmp word [r10 + 24], ax + LONG $0xd3940f41 // sete r11b + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x1a // cmp word [r10 + 26], ax LONG $0xd4940f41 // sete r12b - LONG $0x6b394566; BYTE $0x1c // cmp word [r11 + 28], r13w - QUAD $0x000000b02494940f // sete byte [rsp + 176] - LONG $0x6b394566; BYTE $0x1e // cmp word [r11 + 30], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x1c // cmp word [r10 + 28], ax + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x1e // cmp word [r10 + 30], ax LONG $0xd7940f40 // sete dil - LONG $0x6b394566; BYTE $0x20 // cmp word [r11 + 32], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x20 // cmp word [r10 + 32], dx LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x6b394566; BYTE $0x22 // cmp word [r11 + 34], r13w - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x6b394566; BYTE $0x24 // cmp word [r11 + 36], r13w - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x6b394566; BYTE $0x26 // cmp word [r11 + 38], r13w - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x6b394566; BYTE $0x28 // cmp word [r11 + 40], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x22 // cmp word [r10 + 34], dx + QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x24 // cmp word [r10 + 36], dx LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x6b394566; BYTE $0x2a // cmp word [r11 + 42], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x26 // cmp word [r10 + 38], dx + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x28 // cmp word [r10 + 40], dx LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x6b394566; BYTE $0x2c // cmp word [r11 + 44], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x2a // cmp word [r10 + 42], dx + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x2c // cmp word [r10 + 44], dx LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x6b394566; BYTE $0x2e // cmp word [r11 + 46], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x2e // cmp word [r10 + 46], dx LONG $0xd7940f41 // sete r15b - LONG $0x6b394566; BYTE $0x30 // cmp word [r11 + 48], r13w - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x6b394566; BYTE $0x32 // cmp word [r11 + 50], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x30 // cmp word [r10 + 48], dx + LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x32 // cmp word [r10 + 50], dx LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x6b394566; BYTE $0x34 // cmp word [r11 + 52], r13w - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x34 // cmp word [r10 + 52], dx + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x36 // cmp word [r10 + 54], dx LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x6b394566; BYTE $0x38 // cmp word [r11 + 56], r13w - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x6b394566; BYTE $0x3a // cmp word [r11 + 58], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x38 // cmp word [r10 + 56], dx + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x3a // cmp word [r10 + 58], dx LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x6b394566; BYTE $0x3c // cmp word [r11 + 60], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x3c // cmp word [r10 + 60], dx QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x6b394566; BYTE $0x3e // cmp word [r11 + 62], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x3e // cmp word [r10 + 62], dx WORD $0x940f; BYTE $0xd2 // sete dl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000e024840244 // add r8b, byte [rsp + 224] + QUAD $0x000000a024840244 // add r8b, byte [rsp + 160] + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xc6 // or r14b, r8b WORD $0xc900 // add cl, cl - LONG $0xa0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 160] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + LONG $0xc0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 192] + LONG $0x03e5c041 // shl r13b, 3 + WORD $0x0845; BYTE $0xf5 // or r13b, r14b + LONG $0x0424448b // mov eax, dword [rsp + 4] LONG $0x02e6c040 // shl sil, 2 WORD $0x0840; BYTE $0xce // or sil, cl - LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al + WORD $0x0844; BYTE $0xe9 // or cl, r13b WORD $0x8941; BYTE $0xc8 // mov r8d, ecx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0841; BYTE $0xf1 // or r9b, sil - LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] + WORD $0x0845; BYTE $0xdc // or r12b, r11b + WORD $0x8941; BYTE $0xc3 // mov r11d, eax + LONG $0x2474b60f; BYTE $0x70 // movzx esi, byte [rsp + 112] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e7c040 // shl dil, 7 WORD $0x0840; BYTE $0xf7 // or dil, sil WORD $0xcb08 // or bl, cl WORD $0x0844; BYTE $0xe7 // or dil, r12b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] - WORD $0xc900 // add cl, cl - LONG $0x68244c02 // add cl, byte [rsp + 104] - WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xc000 // add al, al + LONG $0x68244402 // add al, byte [rsp + 104] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0x0840; BYTE $0xf1 // or cl, sil + WORD $0xc108 // or cl, al WORD $0xce89 // mov esi, ecx LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx @@ -4422,9 +4747,9 @@ LBB1_114: WORD $0x0841; BYTE $0xf7 // or r15b, sil LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] + LONG $0x18244402 // add al, byte [rsp + 24] WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax @@ -4432,7 +4757,7 @@ LBB1_114: WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax @@ -4446,17 +4771,17 @@ LBB1_114: WORD $0xc208 // or dl, al LONG $0x02798844 // mov byte [rcx + 2], r15b WORD $0x5188; BYTE $0x03 // mov byte [rcx + 3], dl - LONG $0x40738d49 // lea rsi, [r11 + 64] + LONG $0x40728d49 // lea rsi, [r10 + 64] LONG $0x04c18348 // add rcx, 4 LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 - JNE LBB1_114 + QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + JNE LBB1_115 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] - JMP LBB1_116 + QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] + JMP LBB1_117 -LBB1_123: - LONG $0x2ab70f44 // movzx r13d, word [rdx] +LBB1_124: + LONG $0x1ab70f44 // movzx r11d, word [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -4465,11 +4790,11 @@ LBB1_123: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_127 + JE LBB1_128 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_125: - LONG $0x2e394466 // cmp word [rsi], r13w +LBB1_126: + LONG $0x1e394466 // cmp word [rsi], r11w LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x940f; BYTE $0xd2 // sete dl WORD $0xdaf6 // neg dl @@ -4477,6 +4802,7 @@ LBB1_125: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 + WORD $0x894d; BYTE $0xf4 // mov r12, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] @@ -4489,153 +4815,182 @@ LBB1_125: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_125 + JNE LBB1_126 LONG $0x01c68349 // add r14, 1 -LBB1_127: - LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_128 - LONG $0x08ff8349 // cmp r15, 8 - QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 - JB LBB1_130 - WORD $0x894c; BYTE $0xf8 // mov rax, r15 - LONG $0x06e0c148 // shl rax, 6 - WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB1_133 - LONG $0xbe048d4b // lea rax, [r14 + 4*r15] - WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB1_133 +LBB1_128: + LONG $0x05ffc149 // sar r15, 5 + LONG $0x20fa8349 // cmp r10, 32 + JL LBB1_129 + LONG $0x08ff8349 // cmp r15, 8 + LONG $0x245c8944; BYTE $0x04 // mov dword [rsp + 4], r11d + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x000000f024bc894c // mov qword [rsp + 240], r15 + JB LBB1_131 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 + LONG $0x06e0c148 // shl rax, 6 + WORD $0x0148; BYTE $0xf0 // add rax, rsi + WORD $0x3949; BYTE $0xc6 // cmp r14, rax + JAE LBB1_134 + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] + WORD $0x3948; BYTE $0xf0 // cmp rax, rsi + JBE LBB1_134 -LBB1_130: +LBB1_131: WORD $0xc031 // xor eax, eax - LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax - WORD $0x894d; BYTE $0xf4 // mov r12, r14 - -LBB1_136: - LONG $0x2464894c; BYTE $0x08 // mov qword [rsp + 8], r12 - WORD $0x894d; BYTE $0xfe // mov r14, r15 - LONG $0x24742b4c; BYTE $0x10 // sub r14, qword [rsp + 16] - QUAD $0x000000c024b4894c // mov qword [rsp + 192], r14 + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax + LONG $0x2474894c; BYTE $0x08 // mov qword [rsp + 8], r14 LBB1_137: - WORD $0x8949; BYTE $0xf3 // mov r11, rsi - LONG $0x2e394466 // cmp word [rsi], r13w - QUAD $0x000000e02494940f // sete byte [rsp + 224] - LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w + LONG $0x247c2b4c; BYTE $0x20 // sub r15, qword [rsp + 32] + QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + +LBB1_138: + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0x1e394466 // cmp word [rsi], r11w + QUAD $0x000000a02494940f // sete byte [rsp + 160] + LONG $0x5e394466; BYTE $0x02 // cmp word [rsi + 2], r11w LONG $0xd0940f41 // sete r8b - LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w + LONG $0x5e394466; BYTE $0x04 // cmp word [rsi + 4], r11w LONG $0xd6940f41 // sete r14b - LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w - QUAD $0x000000d02494940f // sete byte [rsp + 208] - LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x06463966 // cmp word [rsi + 6], ax + LONG $0xd5940f41 // sete r13b + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x08463966 // cmp word [rsi + 8], ax + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x0a463966 // cmp word [rsi + 10], ax LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w - WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x0c463966 // cmp word [rsi + 12], ax + QUAD $0x000000d02494940f // sete byte [rsp + 208] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x0e463966 // cmp word [rsi + 14], ax WORD $0x940f; BYTE $0xd3 // sete bl - LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x10463966 // cmp word [rsi + 16], ax + QUAD $0x000000c02494940f // sete byte [rsp + 192] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x12463966 // cmp word [rsi + 18], ax WORD $0x940f; BYTE $0xd1 // sete cl - LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x14463966 // cmp word [rsi + 20], ax LONG $0xd6940f40 // sete sil - LONG $0x6b394566; BYTE $0x16 // cmp word [r11 + 22], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x16 // cmp word [r10 + 22], ax LONG $0xd1940f41 // sete r9b - LONG $0x6b394566; BYTE $0x18 // cmp word [r11 + 24], r13w - LONG $0xd2940f41 // sete r10b - LONG $0x6b394566; BYTE $0x1a // cmp word [r11 + 26], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x18 // cmp word [r10 + 24], ax + LONG $0xd3940f41 // sete r11b + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x1a // cmp word [r10 + 26], ax LONG $0xd4940f41 // sete r12b - LONG $0x6b394566; BYTE $0x1c // cmp word [r11 + 28], r13w - QUAD $0x000000b02494940f // sete byte [rsp + 176] - LONG $0x6b394566; BYTE $0x1e // cmp word [r11 + 30], r13w + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x1c // cmp word [r10 + 28], ax + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x0424448b // mov eax, dword [rsp + 4] + LONG $0x42394166; BYTE $0x1e // cmp word [r10 + 30], ax LONG $0xd7940f40 // sete dil - LONG $0x6b394566; BYTE $0x20 // cmp word [r11 + 32], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x20 // cmp word [r10 + 32], dx LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x6b394566; BYTE $0x22 // cmp word [r11 + 34], r13w - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x6b394566; BYTE $0x24 // cmp word [r11 + 36], r13w - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x6b394566; BYTE $0x26 // cmp word [r11 + 38], r13w - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x6b394566; BYTE $0x28 // cmp word [r11 + 40], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x22 // cmp word [r10 + 34], dx + QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x24 // cmp word [r10 + 36], dx LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x6b394566; BYTE $0x2a // cmp word [r11 + 42], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x26 // cmp word [r10 + 38], dx + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x28 // cmp word [r10 + 40], dx LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x6b394566; BYTE $0x2c // cmp word [r11 + 44], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x2a // cmp word [r10 + 42], dx + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x2c // cmp word [r10 + 44], dx LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x6b394566; BYTE $0x2e // cmp word [r11 + 46], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x2e // cmp word [r10 + 46], dx LONG $0xd7940f41 // sete r15b - LONG $0x6b394566; BYTE $0x30 // cmp word [r11 + 48], r13w - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x6b394566; BYTE $0x32 // cmp word [r11 + 50], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x30 // cmp word [r10 + 48], dx + LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x32 // cmp word [r10 + 50], dx LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x6b394566; BYTE $0x34 // cmp word [r11 + 52], r13w - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x34 // cmp word [r10 + 52], dx + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x36 // cmp word [r10 + 54], dx LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x6b394566; BYTE $0x38 // cmp word [r11 + 56], r13w - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x6b394566; BYTE $0x3a // cmp word [r11 + 58], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x38 // cmp word [r10 + 56], dx + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x3a // cmp word [r10 + 58], dx LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x6b394566; BYTE $0x3c // cmp word [r11 + 60], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x3c // cmp word [r10 + 60], dx QUAD $0x000000882494940f // sete byte [rsp + 136] - LONG $0x6b394566; BYTE $0x3e // cmp word [r11 + 62], r13w + LONG $0x0424548b // mov edx, dword [rsp + 4] + LONG $0x52394166; BYTE $0x3e // cmp word [r10 + 62], dx WORD $0x940f; BYTE $0xd2 // sete dl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000e024840244 // add r8b, byte [rsp + 224] + QUAD $0x000000a024840244 // add r8b, byte [rsp + 160] + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xc6 // or r14b, r8b WORD $0xc900 // add cl, cl - LONG $0xa0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 160] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + LONG $0xc0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 192] + LONG $0x03e5c041 // shl r13b, 3 + WORD $0x0845; BYTE $0xf5 // or r13b, r14b + LONG $0x0424448b // mov eax, dword [rsp + 4] LONG $0x02e6c040 // shl sil, 2 WORD $0x0840; BYTE $0xce // or sil, cl - LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al + WORD $0x0844; BYTE $0xe9 // or cl, r13b WORD $0x8941; BYTE $0xc8 // mov r8d, ecx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0841; BYTE $0xf1 // or r9b, sil - LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] + WORD $0x0845; BYTE $0xdc // or r12b, r11b + WORD $0x8941; BYTE $0xc3 // mov r11d, eax + LONG $0x2474b60f; BYTE $0x70 // movzx esi, byte [rsp + 112] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e7c040 // shl dil, 7 WORD $0x0840; BYTE $0xf7 // or dil, sil WORD $0xcb08 // or bl, cl WORD $0x0844; BYTE $0xe7 // or dil, r12b - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] - WORD $0xc900 // add cl, cl - LONG $0x68244c02 // add cl, byte [rsp + 104] - WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xc000 // add al, al + LONG $0x68244402 // add al, byte [rsp + 104] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0x0840; BYTE $0xf1 // or cl, sil + WORD $0xc108 // or cl, al WORD $0xce89 // mov esi, ecx LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx @@ -4649,9 +5004,9 @@ LBB1_137: WORD $0x0841; BYTE $0xf7 // or r15b, sil LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] + LONG $0x18244402 // add al, byte [rsp + 24] WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax @@ -4659,7 +5014,7 @@ LBB1_137: WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax @@ -4673,17 +5028,16 @@ LBB1_137: WORD $0xc208 // or dl, al LONG $0x02798844 // mov byte [rcx + 2], r15b WORD $0x5188; BYTE $0x03 // mov byte [rcx + 3], dl - LONG $0x40738d49 // lea rsi, [r11 + 64] + LONG $0x40728d49 // lea rsi, [r10 + 64] LONG $0x04c18348 // add rcx, 4 LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 - JNE LBB1_137 + QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + JNE LBB1_138 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] - LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] - JMP LBB1_139 + QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] + JMP LBB1_140 -LBB1_162: +LBB1_160: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -4693,10 +5047,10 @@ LBB1_162: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_166 + JE LBB1_164 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_164: +LBB1_162: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] WORD $0x940f; BYTE $0xd2 // sete dl @@ -4705,6 +5059,7 @@ LBB1_164: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 + WORD $0x894d; BYTE $0xf1 // mov r9, r14 LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] @@ -4717,29 +5072,29 @@ LBB1_164: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_164 + JNE LBB1_162 LONG $0x01c68349 // add r14, 1 -LBB1_166: +LBB1_164: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_170 + JL LBB1_168 QUAD $0x000000902494894c // mov qword [rsp + 144], r10 QUAD $0x00000098249c894c // mov qword [rsp + 152], r11 - QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 + QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 -LBB1_168: +LBB1_166: QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - QUAD $0x000000e02494940f // sete byte [rsp + 224] + QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6940f41 // sete r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 - QUAD $0x000000d02494940f // sete byte [rsp + 208] + QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x206e394c // cmp qword [rsi + 32], r13 - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] LONG $0x286e394c // cmp qword [rsi + 40], r13 LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x306e394c // cmp qword [rsi + 48], r13 @@ -4747,161 +5102,162 @@ LBB1_168: LONG $0x386e394c // cmp qword [rsi + 56], r13 WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x000000a02494940f // sete byte [rsp + 160] + QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x940f; BYTE $0xd2 // sete dl + LONG $0xd7940f40 // sete dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1940f41 // sete r9b + LONG $0xd0940f41 // sete r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2940f41 // sete r10b + LONG $0xd1940f41 // sete r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3940f41 // sete r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 LONG $0xd4940f41 // sete r12b LONG $0x706e394c // cmp qword [rsi + 112], r13 - QUAD $0x000000b02494940f // sete byte [rsp + 176] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] LONG $0x786e394c // cmp qword [rsi + 120], r13 WORD $0x940f; BYTE $0xd1 // sete cl LONG $0x80ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 128], r13 - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] LONG $0x88ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 136], r13 - QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] LONG $0x90ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 144], r13 - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + QUAD $0x000000802494940f // sete byte [rsp + 128] LONG $0x98ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 152], r13 - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] LONG $0xa0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 160], r13 - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0xa8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 168], r13 - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 LONG $0xd7940f41 // sete r15b LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 + LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 + LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 + LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 + LONG $0x2454940f; BYTE $0x04 // sete byte [rsp + 4] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0940f41 // sete r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x000000b024bcb60f // movzx edi, byte [rsp + 176] + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xd200 // add dl, dl - LONG $0x68245402 // add dl, byte [rsp + 104] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xc000 // add al, al + LONG $0x50244402 // add al, byte [rsp + 80] + WORD $0xc789 // mov edi, eax + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc000 // add al, al + LONG $0x10244402 // add al, byte [rsp + 16] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c68349 // add r14, 4 - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 - JNE LBB1_168 + QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 + JNE LBB1_166 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] -LBB1_170: +LBB1_168: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB1_172 + JNE LBB1_170 -LBB1_39: +LBB1_40: WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB1_40 + JMP LBB1_41 -LBB1_174: +LBB1_172: LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xda490f4d // cmovns r11, r10 @@ -4911,18 +5267,21 @@ LBB1_174: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x02100ff3 // movss xmm0, dword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB1_178 + JE LBB1_176 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB1_176: +LBB1_174: WORD $0x2e0f; BYTE $0x06 // ucomiss xmm0, dword [rsi] LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xca20 // and dl, cl WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 + WORD $0x894d; BYTE $0xf7 // mov r15, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] @@ -4935,282 +5294,410 @@ LBB1_176: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB1_176 + JNE LBB1_174 LONG $0x01c68349 // add r14, 1 -LBB1_178: +LBB1_176: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB1_179 + JL LBB1_177 LONG $0x04fb8349 // cmp r11, 4 - JB LBB1_181 + JB LBB1_179 WORD $0x894c; BYTE $0xd8 // mov rax, r11 LONG $0x07e0c148 // shl rax, 7 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB1_184 + JAE LBB1_182 LONG $0x9e048d4b // lea rax, [r14 + 4*r11] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB1_184 + JBE LBB1_182 -LBB1_181: +LBB1_179: WORD $0x3145; BYTE $0xc0 // xor r8d, r8d WORD $0x8948; BYTE $0xf3 // mov rbx, rsi WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB1_187: - LONG $0x247c894c; BYTE $0x08 // mov qword [rsp + 8], r15 - QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x000000c0249c894c // mov qword [rsp + 192], r11 - WORD $0x294d; BYTE $0xc3 // sub r11, r8 - QUAD $0x000000e0249c894c // mov qword [rsp + 224], r11 +LBB1_185: + QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 + WORD $0x294d; BYTE $0xc3 // sub r11, r8 + QUAD $0x00000098249c894c // mov qword [rsp + 152], r11 -LBB1_188: +LBB1_186: WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - QUAD $0x000000d02494940f // sete byte [rsp + 208] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + WORD $0x8941; BYTE $0xcd // mov r13d, ecx LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] - LONG $0xd0940f41 // sete r8b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al LONG $0x08432e0f // ucomiss xmm0, dword [rbx + 8] - LONG $0xd6940f41 // sete r14b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x0c432e0f // ucomiss xmm0, dword [rbx + 12] - LONG $0xd5940f41 // sete r13b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x10432e0f // ucomiss xmm0, dword [rbx + 16] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x14432e0f // ucomiss xmm0, dword [rbx + 20] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x18432e0f // ucomiss xmm0, dword [rbx + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x1c432e0f // ucomiss xmm0, dword [rbx + 28] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl LONG $0x20432e0f // ucomiss xmm0, dword [rbx + 32] - QUAD $0x000000b02494940f // sete byte [rsp + 176] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x24432e0f // ucomiss xmm0, dword [rbx + 36] - WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x28432e0f // ucomiss xmm0, dword [rbx + 40] - LONG $0xd6940f40 // sete sil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x2c432e0f // ucomiss xmm0, dword [rbx + 44] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x30432e0f // ucomiss xmm0, dword [rbx + 48] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x34432e0f // ucomiss xmm0, dword [rbx + 52] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x38432e0f // ucomiss xmm0, dword [rbx + 56] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl LONG $0x3c432e0f // ucomiss xmm0, dword [rbx + 60] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x40432e0f // ucomiss xmm0, dword [rbx + 64] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x44432e0f // ucomiss xmm0, dword [rbx + 68] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al LONG $0x48432e0f // ucomiss xmm0, dword [rbx + 72] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl LONG $0x4c432e0f // ucomiss xmm0, dword [rbx + 76] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl LONG $0x50432e0f // ucomiss xmm0, dword [rbx + 80] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xc0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 192], cl LONG $0x54432e0f // ucomiss xmm0, dword [rbx + 84] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xd0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 208], cl LONG $0x58432e0f // ucomiss xmm0, dword [rbx + 88] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al LONG $0x5c432e0f // ucomiss xmm0, dword [rbx + 92] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al LONG $0x60432e0f // ucomiss xmm0, dword [rbx + 96] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl LONG $0x64432e0f // ucomiss xmm0, dword [rbx + 100] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al LONG $0x68432e0f // ucomiss xmm0, dword [rbx + 104] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al LONG $0x6c432e0f // ucomiss xmm0, dword [rbx + 108] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al LONG $0x70432e0f // ucomiss xmm0, dword [rbx + 112] - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al LONG $0x74432e0f // ucomiss xmm0, dword [rbx + 116] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl LONG $0x78432e0f // ucomiss xmm0, dword [rbx + 120] - QUAD $0x000000882494940f // sete byte [rsp + 136] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x7c432e0f // ucomiss xmm0, dword [rbx + 124] - WORD $0x940f; BYTE $0xd1 // sete cl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000d024840244 // add r8b, byte [rsp + 208] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xc6 // or r14b, r8b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f40 // sete sil + WORD $0x2040; BYTE $0xc6 // and sil, al WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e6c040 // shl sil, 2 - WORD $0x0840; BYTE $0xd6 // or sil, dl - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd0 // mov r8d, edx - LONG $0x03e7c040 // shl dil, 3 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] + WORD $0x8941; BYTE $0xd5 // mov r13d, edx + LONG $0x2454b60f; BYTE $0x30 // movzx edx, byte [rsp + 48] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xc2 // or dl, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xf1 // or r9b, sil - WORD $0x0841; BYTE $0xd3 // or r11b, dl - WORD $0x0845; BYTE $0xe1 // or r9b, r12b - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xc000 // add al, al - LONG $0x68244402 // add al, byte [rsp + 104] - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xf2 // or dl, sil - LONG $0x24748b48; BYTE $0x08 // mov rsi, qword [rsp + 8] - WORD $0x8844; BYTE $0x1e // mov byte [rsi], r11b - LONG $0x247cb60f; BYTE $0x40 // movzx edi, byte [rsp + 64] - LONG $0x06e7c040 // shl dil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x014e8844 // mov byte [rsi + 1], r9b - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] + WORD $0xc900 // add cl, cl + LONG $0x18244c02 // add cl, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x04244488 // mov byte [rsp + 4], al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] + WORD $0xe2c0; BYTE $0x04 // shl dl, 4 + WORD $0xca08 // or dl, cl + LONG $0x6cb60f44; WORD $0x6824 // movzx r13d, byte [rsp + 104] + LONG $0x05e5c041 // shl r13b, 5 + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + LONG $0x6cb60f44; WORD $0x6024 // movzx r13d, byte [rsp + 96] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xcd // or r13b, cl + WORD $0x0045; BYTE $0xe4 // add r12b, r12b + LONG $0x24640244; BYTE $0x50 // add r12b, byte [rsp + 80] + WORD $0x8944; BYTE $0xe1 // mov ecx, r12d + LONG $0x64b60f44; WORD $0x7824 // movzx r12d, byte [rsp + 120] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xcc // or r12b, cl + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + WORD $0x8941; BYTE $0xcc // mov r12d, ecx + QUAD $0x000000c0248cb60f // movzx ecx, byte [rsp + 192] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + LONG $0x64b60f44; WORD $0x0424 // movzx r12d, byte [rsp + 4] + WORD $0x0841; BYTE $0xc4 // or r12b, al + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - QUAD $0x000000882494b60f // movzx edx, byte [rsp + 136] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0xd108 // or cl, dl - WORD $0xc108 // or cl, al - LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl + LONG $0x06e6c041 // shl r14b, 6 + WORD $0x0841; BYTE $0xc6 // or r14b, al + WORD $0x0841; BYTE $0xd5 // or r13b, dl + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf0 // or r8b, r14b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x000000b0249c0244 // add r11b, byte [rsp + 176] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + LONG $0x04e7c041 // shl r15b, 4 + WORD $0x0845; BYTE $0xcf // or r15b, r9b + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + WORD $0x8844; BYTE $0x20 // mov byte [rax], r12b + QUAD $0x000000a0248cb60f // movzx ecx, byte [rsp + 160] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x01688844 // mov byte [rax + 1], r13b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0844; BYTE $0xfe // or sil, r15b + LONG $0x02408844 // mov byte [rax + 2], r8b + LONG $0x03708840 // mov byte [rax + 3], sil LONG $0x80c38148; WORD $0x0000; BYTE $0x00 // add rbx, 128 - LONG $0x04c68348 // add rsi, 4 - LONG $0x24748948; BYTE $0x08 // mov qword [rsp + 8], rsi - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 - JNE LBB1_188 - LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000008824848948 // mov qword [rsp + 136], rax + QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + JNE LBB1_186 + QUAD $0x0000008824bc8b4c // mov r15, qword [rsp + 136] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] - JMP LBB1_190 + QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] + JMP LBB1_188 LBB1_9: - LONG $0x2474894c; BYTE $0x78 // mov qword [rsp + 120], r14 + LONG $0x2474894c; BYTE $0x58 // mov qword [rsp + 88], r14 -LBB1_92: +LBB1_93: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB1_95 + JNE LBB1_96 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - JMP LBB1_98 + JMP LBB1_99 -LBB1_61: - LONG $0x2474894c; BYTE $0x50 // mov qword [rsp + 80], r14 +LBB1_62: + LONG $0x2474894c; BYTE $0x38 // mov qword [rsp + 56], r14 -LBB1_72: +LBB1_73: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB1_75 + JNE LBB1_76 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - JMP LBB1_78 + JMP LBB1_79 -LBB1_105: +LBB1_106: LONG $0x2474894c; BYTE $0x08 // mov qword [rsp + 8], r14 -LBB1_116: - LONG $0x05e3c149 // shl r11, 5 - WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB1_202 - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xd8 // sub r8, r11 - WORD $0xf749; BYTE $0xd3 // not r11 - WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB1_121 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - JMP LBB1_119 +LBB1_117: + LONG $0x05e7c149 // shl r15, 5 + WORD $0x394d; BYTE $0xd7 // cmp r15, r10 + JGE LBB1_199 + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0x294d; BYTE $0xf8 // sub r8, r15 + WORD $0xf749; BYTE $0xd7 // not r15 + WORD $0x014d; BYTE $0xd7 // add r15, r10 + JE LBB1_119 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xff // xor r15d, r15d + LONG $0x24748b4c; BYTE $0x08 // mov r14, qword [rsp + 8] -LBB1_128: - WORD $0x894d; BYTE $0xf4 // mov r12, r14 +LBB1_123: + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x1e394466 // cmp word [rsi], r11w + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xff // mov rdi, r15 + LONG $0x03efc148 // shr rdi, 3 + LONG $0x14b60f45; BYTE $0x3e // movzx r10d, byte [r14 + rdi] + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd2 // xor dl, r10b + WORD $0xd320 // and bl, dl + WORD $0x3044; BYTE $0xd3 // xor bl, r10b + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x02c78349 // add r15, 2 + LONG $0x5e394466; BYTE $0x02 // cmp word [rsi + 2], r11w + LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xdaf6 // neg dl + WORD $0xda30 // xor dl, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd020 // and al, dl + WORD $0xd830 // xor al, bl + LONG $0x3e048841 // mov byte [r14 + rdi], al + WORD $0x394d; BYTE $0xf9 // cmp r9, r15 + JNE LBB1_123 + JMP LBB1_120 + +LBB1_129: + LONG $0x2474894c; BYTE $0x08 // mov qword [rsp + 8], r14 -LBB1_139: +LBB1_140: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB1_144 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - JMP LBB1_142 + JNE LBB1_142 -LBB1_179: +LBB1_119: + WORD $0x3145; BYTE $0xff // xor r15d, r15d + JMP LBB1_120 + +LBB1_177: WORD $0x894d; BYTE $0xf7 // mov r15, r14 WORD $0x8948; BYTE $0xf3 // mov rbx, rsi -LBB1_190: +LBB1_188: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB1_202 + JGE LBB1_199 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB1_195 + JNE LBB1_193 WORD $0xf631 // xor esi, esi - JMP LBB1_193 + JMP LBB1_191 -LBB1_158: +LBB1_156: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB1_159: +LBB1_157: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 + WORD $0x894d; BYTE $0xf7 // mov r15, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 @@ -5233,21 +5720,21 @@ LBB1_159: WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB1_159 + JNE LBB1_157 LBB1_24: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 + JE LBB1_199 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - JMP LBB1_201 + JMP LBB1_26 -LBB1_95: +LBB1_96: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - LONG $0x24748b4c; BYTE $0x78 // mov r14, qword [rsp + 120] + LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] -LBB1_96: +LBB1_97: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e1c3846 // cmp byte [rsi + r9], r11b WORD $0x940f; BYTE $0xd3 // sete bl @@ -5275,27 +5762,27 @@ LBB1_96: WORD $0xd030 // xor al, dl LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB1_96 + JNE LBB1_97 WORD $0x014c; BYTE $0xce // add rsi, r9 -LBB1_98: +LBB1_99: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 + JE LBB1_199 WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xca // mov rdx, r9 LONG $0x03eac148 // shr rdx, 3 - LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] - JMP LBB1_80 + LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] + JMP LBB1_81 -LBB1_75: +LBB1_76: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - LONG $0x24748b4c; BYTE $0x50 // mov r14, qword [rsp + 80] + LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] -LBB1_76: +LBB1_77: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e1c3846 // cmp byte [rsi + r9], r11b WORD $0x940f; BYTE $0xd3 // sete bl @@ -5323,50 +5810,55 @@ LBB1_76: WORD $0xd030 // xor al, dl LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB1_76 + JNE LBB1_77 WORD $0x014c; BYTE $0xce // add rsi, r9 -LBB1_78: +LBB1_79: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 + JE LBB1_199 WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xca // mov rdx, r9 LONG $0x03eac148 // shr rdx, 3 - LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] -LBB1_80: +LBB1_81: LONG $0x103c8a41 // mov dil, byte [r8 + rdx] LONG $0x07e18041 // and r9b, 7 WORD $0x01b3 // mov bl, 1 WORD $0x8944; BYTE $0xc9 // mov ecx, r9d - JMP LBB1_81 + JMP LBB1_82 -LBB1_197: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 +LBB1_195: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB1_198: +LBB1_196: LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xc820 // and al, cl WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0x894d; BYTE $0xf7 // mov r15, r14 + LONG $0x14b60f45; BYTE $0x3e // movzx r10d, byte [r14 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b + WORD $0x3044; BYTE $0xd3 // xor bl, r10b LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] LONG $0x10768d48 // lea rsi, [rsi + 16] + LONG $0xd29b0f41 // setnp r10b WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xd0 // and al, r10b WORD $0xd8f6 // neg al WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 @@ -5375,26 +5867,42 @@ LBB1_198: WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB1_198 + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB1_196 -LBB1_199: - LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - JMP LBB1_201 +LBB1_197: + LONG $0x01c0f641 // test r8b, 1 + JE LBB1_199 + LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xd8 // mov rax, r11 + LONG $0x03e8c148 // shr rax, 3 + LONG $0x06348a41 // mov sil, byte [r14 + rax] + LONG $0x07e38041 // and r11b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x061c8841 // mov byte [r14 + rax], bl + JMP LBB1_199 -LBB1_172: +LBB1_170: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB1_173: +LBB1_171: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 + WORD $0x894d; BYTE $0xf7 // mov r15, r14 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 @@ -5417,14 +5925,14 @@ LBB1_173: WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB1_173 + JNE LBB1_171 -LBB1_40: +LBB1_41: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 + JE LBB1_199 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 -LBB1_201: +LBB1_26: WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 @@ -5438,32 +5946,32 @@ LBB1_201: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xf3 // xor bl, sil LONG $0x161c8841 // mov byte [r14 + rdx], bl - JMP LBB1_202 + JMP LBB1_199 -LBB1_121: +LBB1_142: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] + WORD $0x3145; BYTE $0xff // xor r15d, r15d + LONG $0x24748b4c; BYTE $0x08 // mov r14, qword [rsp + 8] -LBB1_122: +LBB1_143: WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x2e394466 // cmp word [rsi], r13w + LONG $0x1e394466 // cmp word [rsi], r11w WORD $0x940f; BYTE $0xd2 // sete dl WORD $0xdaf6 // neg dl - WORD $0x894c; BYTE $0xf7 // mov rdi, r14 + WORD $0x894c; BYTE $0xff // mov rdi, r15 LONG $0x03efc148 // shr rdi, 3 - LONG $0x14b60f45; BYTE $0x3b // movzx r10d, byte [r11 + rdi] - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d + LONG $0x14b60f45; BYTE $0x3e // movzx r10d, byte [r14 + rdi] + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl WORD $0x3044; BYTE $0xd2 // xor dl, r10b WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xd3 // xor bl, r10b - LONG $0x3b1c8841 // mov byte [r11 + rdi], bl - LONG $0x02c68349 // add r14, 2 - LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x02c78349 // add r15, 2 + LONG $0x5e394466; BYTE $0x02 // cmp word [rsi + 2], r11w LONG $0x04768d48 // lea rsi, [rsi + 4] WORD $0x940f; BYTE $0xd2 // sete dl WORD $0xdaf6 // neg dl @@ -5473,178 +5981,128 @@ LBB1_122: WORD $0xe0d2 // shl al, cl WORD $0xd020 // and al, dl WORD $0xd830 // xor al, bl - LONG $0x3b048841 // mov byte [r11 + rdi], al - WORD $0x394d; BYTE $0xf1 // cmp r9, r14 - JNE LBB1_122 + LONG $0x3e048841 // mov byte [r14 + rdi], al + WORD $0x394d; BYTE $0xf9 // cmp r9, r15 + JNE LBB1_143 -LBB1_119: +LBB1_120: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 - LONG $0x2e394466 // cmp word [rsi], r13w + JE LBB1_199 + LONG $0x1e394466 // cmp word [rsi], r11w WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xf2 // mov rdx, r14 + WORD $0x894c; BYTE $0xfa // mov rdx, r15 LONG $0x03eac148 // shr rdx, 3 LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] LONG $0x103c8a41 // mov dil, byte [r8 + rdx] - LONG $0x07e68041 // and r14b, 7 + LONG $0x07e78041 // and r15b, 7 WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d -LBB1_81: +LBB1_82: WORD $0xe3d2 // shl bl, cl WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB1_202 -LBB1_144: +LBB1_199: + MOVQ 304(SP), SP + RET + +LBB1_193: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - -LBB1_145: - WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x2e394466 // cmp word [rsi], r13w - WORD $0x940f; BYTE $0xd2 // sete dl - WORD $0xdaf6 // neg dl - WORD $0x894c; BYTE $0xf7 // mov rdi, r14 - LONG $0x03efc148 // shr rdi, 3 - LONG $0x14b60f45; BYTE $0x3c // movzx r10d, byte [r12 + rdi] - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xd2 // xor dl, r10b - WORD $0xd320 // and bl, dl - WORD $0x3044; BYTE $0xd3 // xor bl, r10b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl - LONG $0x02c68349 // add r14, 2 - LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0x04768d48 // lea rsi, [rsi + 4] - WORD $0x940f; BYTE $0xd2 // sete dl - WORD $0xdaf6 // neg dl - WORD $0xda30 // xor dl, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd020 // and al, dl - WORD $0xd830 // xor al, bl - LONG $0x3c048841 // mov byte [r12 + rdi], al - WORD $0x394d; BYTE $0xf1 // cmp r9, r14 - JNE LBB1_145 - -LBB1_142: - LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 - LONG $0x2e394466 // cmp word [rsi], r13w - WORD $0x940f; BYTE $0xd0 // sete al - WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xf2 // mov rdx, r14 - LONG $0x03eac148 // shr rdx, 3 - LONG $0x143c8a41 // mov dil, byte [r12 + rdx] - LONG $0x07e68041 // and r14b, 7 - WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d - WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al - WORD $0x3040; BYTE $0xfb // xor bl, dil - LONG $0x141c8841 // mov byte [r12 + rdx], bl - JMP LBB1_202 - -LBB1_195: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi - WORD $0x894d; BYTE $0xfb // mov r11, r15 -LBB1_196: +LBB1_194: WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xca20 // and dl, cl WORD $0xdaf6 // neg dl WORD $0x8948; BYTE $0xf7 // mov rdi, rsi LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b + LONG $0x14b60f45; BYTE $0x3f // movzx r10d, byte [r15 + rdi] WORD $0xf189 // mov ecx, esi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd020 // and al, dl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3b048841 // mov byte [r11 + rdi], al + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd2 // xor dl, r10b + WORD $0x2041; BYTE $0xd3 // and r11b, dl + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x3f1c8845 // mov byte [r15 + rdi], r11b LONG $0x02c68348 // add rsi, 2 LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] LONG $0x085b8d48 // lea rbx, [rbx + 8] - LONG $0xd1940f41 // sete r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xc1 // xor r9b, al + LONG $0xd29b0f41 // setnp r10b + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x2044; BYTE $0xd2 // and dl, r10b + WORD $0x894d; BYTE $0xfa // mov r10, r15 + WORD $0xdaf6 // neg dl + WORD $0x3044; BYTE $0xda // xor dl, r11b WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0x2044; BYTE $0xca // and dl, r9b - WORD $0xc230 // xor dl, al - LONG $0x3b148841 // mov byte [r11 + rdi], dl - WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB1_196 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd020 // and al, dl + WORD $0x3044; BYTE $0xd8 // xor al, r11b + LONG $0x3f048841 // mov byte [r15 + rdi], al + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + JNE LBB1_194 -LBB1_193: +LBB1_191: LONG $0x01c0f641 // test r8b, 1 - JE LBB1_202 + JE LBB1_199 WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - WORD $0x940f; BYTE $0xd0 // sete al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xf2 // mov rdx, rsi - LONG $0x03eac148 // shr rdx, 3 - WORD $0x894d; BYTE $0xfe // mov r14, r15 - LONG $0x173c8a41 // mov dil, byte [r15 + rdx] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x073c8a41 // mov dil, byte [r15 + rax] LONG $0x07e68040 // and sil, 7 WORD $0x01b3 // mov bl, 1 WORD $0xf189 // mov ecx, esi WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xfa // xor dl, dil + WORD $0xd320 // and bl, dl WORD $0x3040; BYTE $0xfb // xor bl, dil - LONG $0x171c8841 // mov byte [r15 + rdx], bl - -LBB1_202: - MOVQ 320(SP), SP - RET + LONG $0x071c8841 // mov byte [r15 + rax], bl + JMP LBB1_199 -LBB1_86: +LBB1_87: LONG $0xf0e78349 // and r15, -16 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi QUAD $0x0000010824848948 // mov qword [rsp + 264], rax - QUAD $0x000000f824bc894c // mov qword [rsp + 248], r15 + QUAD $0x000000e824bc894c // mov qword [rsp + 232], r15 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] - LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax + LONG $0x24448948; BYTE $0x58 // mov qword [rsp + 88], rax LONG $0xc3b60f41 // movzx eax, r11b LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 - QUAD $0x0000d0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm1 + QUAD $0x0000c0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm1 WORD $0xc031 // xor eax, eax QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 -LBB1_87: +LBB1_88: WORD $0x8948; BYTE $0xc7 // mov rdi, rax QUAD $0x0000009824848948 // mov qword [rsp + 152], rax LONG $0x05e7c148 // shl rdi, 5 WORD $0x8949; BYTE $0xfb // mov r11, rdi - WORD $0x8949; BYTE $0xfe // mov r14, rdi - WORD $0x8948; BYTE $0xfb // mov rbx, rdi WORD $0x8949; BYTE $0xff // mov r15, rdi + WORD $0x8948; BYTE $0xfb // mov rbx, rdi + WORD $0x8949; BYTE $0xfc // mov r12, rdi WORD $0x8949; BYTE $0xfa // mov r10, rdi + WORD $0x8949; BYTE $0xfe // mov r14, rdi WORD $0x8949; BYTE $0xf8 // mov r8, rdi - WORD $0x8949; BYTE $0xfc // mov r12, rdi WORD $0x8949; BYTE $0xf9 // mov r9, rdi + WORD $0x8948; BYTE $0xf8 // mov rax, rdi WORD $0x8948; BYTE $0xfa // mov rdx, rdi - LONG $0x247c8948; BYTE $0x58 // mov qword [rsp + 88], rdi - LONG $0x247c8948; BYTE $0x38 // mov qword [rsp + 56], rdi + LONG $0x247c8948; BYTE $0x68 // mov qword [rsp + 104], rdi LONG $0x3e0cb60f // movzx ecx, byte [rsi + rdi] LONG $0x6e0f4466; BYTE $0xf9 // movd xmm15, ecx LONG $0x3e4cb60f; BYTE $0x01 // movzx ecx, byte [rsi + rdi + 1] @@ -5661,7 +6119,7 @@ LBB1_87: LONG $0xd96e0f66 // movd xmm3, ecx LONG $0x3e4cb60f; BYTE $0x07 // movzx ecx, byte [rsi + rdi + 7] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x0000e024847f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm0 + QUAD $0x0000b024847f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm0 LONG $0x3e4cb60f; BYTE $0x08 // movzx ecx, byte [rsi + rdi + 8] LONG $0xc16e0f66 // movd xmm0, ecx QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 @@ -5669,7 +6127,7 @@ LBB1_87: LONG $0x6e0f4466; BYTE $0xd1 // movd xmm10, ecx LONG $0x3e4cb60f; BYTE $0x0a // movzx ecx, byte [rsi + rdi + 10] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x0000c024847f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm0 + QUAD $0x0000a024847f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm0 LONG $0x3e4cb60f; BYTE $0x0b // movzx ecx, byte [rsi + rdi + 11] LONG $0x6e0f4466; BYTE $0xd9 // movd xmm11, ecx LONG $0x3e4cb60f; BYTE $0x0c // movzx ecx, byte [rsi + rdi + 12] @@ -5678,172 +6136,170 @@ LBB1_87: LONG $0x6e0f4466; BYTE $0xe1 // movd xmm12, ecx LONG $0x3e4cb60f; BYTE $0x0e // movzx ecx, byte [rsi + rdi + 14] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00012024847f0f66; BYTE $0x00 // movdqa oword [rsp + 288], xmm0 - LONG $0x247c8948; BYTE $0x20 // mov qword [rsp + 32], rdi + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 + LONG $0x247c8948; BYTE $0x28 // mov qword [rsp + 40], rdi WORD $0x8949; BYTE $0xfd // mov r13, rdi LONG $0x20cd8349 // or r13, 32 - LONG $0x246c894c; BYTE $0x28 // mov qword [rsp + 40], r13 + LONG $0x246c894c; BYTE $0x08 // mov qword [rsp + 8], r13 WORD $0x8948; BYTE $0xf9 // mov rcx, rdi LONG $0x40c98348 // or rcx, 64 - LONG $0x244c8948; BYTE $0x40 // mov qword [rsp + 64], rcx + LONG $0x244c8948; BYTE $0x50 // mov qword [rsp + 80], rcx LONG $0x60cb8349 // or r11, 96 LONG $0x80cb8148; WORD $0x0000; BYTE $0x00 // or rbx, 128 - LONG $0xa0ce8149; WORD $0x0000; BYTE $0x00 // or r14, 160 - LONG $0xc0cf8149; WORD $0x0000; BYTE $0x00 // or r15, 192 + LONG $0xa0cf8149; WORD $0x0000; BYTE $0x00 // or r15, 160 + LONG $0xc0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 192 LONG $0xe0ca8149; WORD $0x0000; BYTE $0x00 // or r10, 224 - LONG $0x00cc8149; WORD $0x0001; BYTE $0x00 // or r12, 256 + LONG $0x00ce8149; WORD $0x0001; BYTE $0x00 // or r14, 256 LONG $0x20c98149; WORD $0x0001; BYTE $0x00 // or r9, 288 - QUAD $0x00000080248c894c // mov qword [rsp + 128], r9 - LONG $0x40ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 320 - LONG $0x24548948; BYTE $0x30 // mov qword [rsp + 48], rdx - LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] + LONG $0x244c894c; BYTE $0x78 // mov qword [rsp + 120], r9 + LONG $0x01400d48; WORD $0x0000 // or rax, 320 + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax LONG $0x60ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 352 - LONG $0x24548948; BYTE $0x58 // mov qword [rsp + 88], rdx - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] + QUAD $0x000000d024948948 // mov qword [rsp + 208], rdx + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] LONG $0x80c88149; WORD $0x0001; BYTE $0x00 // or r8, 384 WORD $0x8948; BYTE $0xf8 // mov rax, rdi LONG $0x01a00d48; WORD $0x0000 // or rax, 416 - LONG $0x24448948; BYTE $0x70 // mov qword [rsp + 112], rax + LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax WORD $0x8948; BYTE $0xf8 // mov rax, rdi LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax + LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax WORD $0x8948; BYTE $0xf8 // mov rax, rdi LONG $0x01e00d48; WORD $0x0000 // or rax, 480 - LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax QUAD $0x012e3c203a0f4666 // pinsrb xmm15, byte [rsi + r13], 1 QUAD $0x020e3c203a0f4466 // pinsrb xmm15, byte [rsi + rcx], 2 - LONG $0x245c894c; BYTE $0x68 // mov qword [rsp + 104], r11 QUAD $0x031e3c203a0f4666 // pinsrb xmm15, byte [rsi + r11], 3 - LONG $0x245c8948; BYTE $0x50 // mov qword [rsp + 80], rbx + LONG $0x245c8948; BYTE $0x30 // mov qword [rsp + 48], rbx QUAD $0x041e3c203a0f4466 // pinsrb xmm15, byte [rsi + rbx], 4 - LONG $0x2474894c; BYTE $0x60 // mov qword [rsp + 96], r14 - QUAD $0x05363c203a0f4666 // pinsrb xmm15, byte [rsi + r14], 5 - QUAD $0x063e3c203a0f4666 // pinsrb xmm15, byte [rsi + r15], 6 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + QUAD $0x053e3c203a0f4666 // pinsrb xmm15, byte [rsi + r15], 5 + QUAD $0x06263c203a0f4666 // pinsrb xmm15, byte [rsi + r12], 6 WORD $0x894c; BYTE $0xd7 // mov rdi, r10 + QUAD $0x000000802494894c // mov qword [rsp + 128], r10 QUAD $0x07163c203a0f4666 // pinsrb xmm15, byte [rsi + r10], 7 - QUAD $0x08263c203a0f4666 // pinsrb xmm15, byte [rsi + r12], 8 + WORD $0x894d; BYTE $0xf2 // mov r10, r14 + QUAD $0x08363c203a0f4666 // pinsrb xmm15, byte [rsi + r14], 8 QUAD $0x090e3c203a0f4666 // pinsrb xmm15, byte [rsi + r9], 9 - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] + LONG $0x246c8b4c; BYTE $0x10 // mov r13, qword [rsp + 16] QUAD $0x0a2e3c203a0f4666 // pinsrb xmm15, byte [rsi + r13], 10 QUAD $0x0b163c203a0f4466 // pinsrb xmm15, byte [rsi + rdx], 11 QUAD $0x0c063c203a0f4666 // pinsrb xmm15, byte [rsi + r8], 12 - LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] - QUAD $0x0d0e3c203a0f4666 // pinsrb xmm15, byte [rsi + r9], 13 - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] - QUAD $0x0e0e3c203a0f4466 // pinsrb xmm15, byte [rsi + rcx], 14 + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + QUAD $0x0d363c203a0f4666 // pinsrb xmm15, byte [rsi + r14], 13 + LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] + QUAD $0x0e0e3c203a0f4666 // pinsrb xmm15, byte [rsi + r9], 14 QUAD $0x0f063c203a0f4466 // pinsrb xmm15, byte [rsi + rax], 15 - LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] - QUAD $0x01166c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r10 + 1], 1 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] + LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] + QUAD $0x01010e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 1], 1 + LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] QUAD $0x02010e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 1], 2 QUAD $0x011e6c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r11 + 1], 3 + WORD $0x894c; BYTE $0xd9 // mov rcx, r11 QUAD $0x04011e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 1], 4 - QUAD $0x01366c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r14 + 1], 5 - QUAD $0x013e6c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r15 + 1], 6 - QUAD $0x000000b024bc894c // mov qword [rsp + 176], r15 + QUAD $0x013e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r15 + 1], 5 + QUAD $0x01266c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r12 + 1], 6 + LONG $0x2464894c; BYTE $0x70 // mov qword [rsp + 112], r12 QUAD $0x07013e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 1], 7 - WORD $0x8949; BYTE $0xfe // mov r14, rdi - QUAD $0x000000a024bc8948 // mov qword [rsp + 160], rdi - QUAD $0x01266c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r12 + 1], 8 - WORD $0x894c; BYTE $0xe3 // mov rbx, r12 - LONG $0x2464894c; BYTE $0x48 // mov qword [rsp + 72], r12 - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] - QUAD $0x09010e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 1], 9 + QUAD $0x01166c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r10 + 1], 8 + WORD $0x894c; BYTE $0xd3 // mov rbx, r10 + LONG $0x2454894c; BYTE $0x38 // mov qword [rsp + 56], r10 + LONG $0x247c8b4c; BYTE $0x78 // mov r15, qword [rsp + 120] + QUAD $0x013e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r15 + 1], 9 QUAD $0x012e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r13 + 1], 10 QUAD $0x0b01166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 1], 11 QUAD $0x01066c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r8 + 1], 12 WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0x2444894c; BYTE $0x38 // mov qword [rsp + 56], r8 - QUAD $0x010e6c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r9 + 1], 13 - LONG $0x24648b4c; BYTE $0x18 // mov r12, qword [rsp + 24] - QUAD $0x01266c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r12 + 1], 14 + LONG $0x2444894c; BYTE $0x68 // mov qword [rsp + 104], r8 + QUAD $0x01366c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r14 + 1], 13 + QUAD $0x010e6c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r9 + 1], 14 + WORD $0x894d; BYTE $0xce // mov r14, r9 QUAD $0x0f01066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 1], 15 - QUAD $0x00d0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 208] + WORD $0x8949; BYTE $0xc1 // mov r9, rax + QUAD $0x00c0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 192] LONG $0x740f4166; BYTE $0xe9 // pcmpeqb xmm5, xmm9 LONG $0xfd6f0f66 // movdqa xmm7, xmm5 QUAD $0x000000a0a56f0f66 // movdqa xmm4, oword 160[rbp] /* [rip + .LCPI1_10] */ LONG $0xfcdb0f66 // pand xmm7, xmm4 LONG $0xfdf80f66 // psubb xmm7, xmm5 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - LONG $0x0654b60f; BYTE $0x0f // movzx edx, byte [rsi + rax + 15] + LONG $0x24448b4c; BYTE $0x28 // mov r8, qword [rsp + 40] + LONG $0x54b60f42; WORD $0x0f06 // movzx edx, byte [rsi + r8 + 15] LONG $0x6e0f4466; BYTE $0xf2 // movd xmm14, edx LONG $0x740f4566; BYTE $0xf9 // pcmpeqb xmm15, xmm9 - LONG $0x24448b4c; BYTE $0x28 // mov r8, qword [rsp + 40] - QUAD $0x020674203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r8 + 2], 1 - LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] - QUAD $0x021e74203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rsi + r11 + 2], 2 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] - QUAD $0x022e74203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r13 + 2], 3 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] + LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] + QUAD $0x021e74203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r11 + 2], 1 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x02020674203a0f66 // pinsrb xmm6, byte [rsi + rax + 2], 2 + WORD $0x8949; BYTE $0xcd // mov r13, rcx + QUAD $0x03020e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 2], 3 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x04020e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 2], 4 - LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x05023e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 2], 5 - QUAD $0x023e74203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r15 + 2], 6 - QUAD $0x023674203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r14 + 2], 7 + QUAD $0x022674203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r12 + 2], 6 + QUAD $0x0000008024948b48 // mov rdx, qword [rsp + 128] + QUAD $0x07021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 7 QUAD $0x08021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 8 - QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] - QUAD $0x09021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 9 - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - QUAD $0x023674203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r14 + 2], 10 - LONG $0x247c8b4c; BYTE $0x58 // mov r15, qword [rsp + 88] - QUAD $0x023e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r15 + 2], 11 + WORD $0x894c; BYTE $0xfb // mov rbx, r15 + QUAD $0x023e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r15 + 2], 9 + LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] + QUAD $0x023e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r15 + 2], 10 + QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] + QUAD $0x022674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r12 + 2], 11 QUAD $0x021674203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r10 + 2], 12 - WORD $0x894d; BYTE $0xca // mov r10, r9 - QUAD $0x020e74203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r9 + 2], 13 - QUAD $0x022674203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r12 + 2], 14 - LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] + LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] + QUAD $0x021674203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r10 + 2], 13 + QUAD $0x023674203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r14 + 2], 14 + LONG $0x244c894c; BYTE $0x40 // mov qword [rsp + 64], r9 QUAD $0x020e74203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r9 + 2], 15 LONG $0xdb0f4466; BYTE $0xfc // pand xmm15, xmm4 LONG $0x740f4166; BYTE $0xf1 // pcmpeqb xmm6, xmm9 QUAD $0x000000b0856f0f66 // movdqa xmm0, oword 176[rbp] /* [rip + .LCPI1_11] */ LONG $0xf0db0f66 // pand xmm6, xmm0 LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 - LONG $0x0654b60f; BYTE $0x10 // movzx edx, byte [rsi + rax + 16] + LONG $0x54b60f42; WORD $0x1006 // movzx edx, byte [rsi + r8 + 16] LONG $0x6e0f4466; BYTE $0xfa // movd xmm15, edx - WORD $0x894c; BYTE $0xc2 // mov rdx, r8 - QUAD $0x030654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r8 + 3], 1 - WORD $0x894c; BYTE $0xd8 // mov rax, r11 - QUAD $0x031e54203a0f4266; BYTE $0x02 // pinsrb xmm2, byte [rsi + r11 + 3], 2 + WORD $0x894c; BYTE $0xda // mov rdx, r11 + QUAD $0x031e54203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r11 + 3], 1 + QUAD $0x02030654203a0f66 // pinsrb xmm2, byte [rsi + rax + 3], 2 QUAD $0x032e54203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r13 + 3], 3 QUAD $0x04030e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 3], 4 WORD $0x8949; BYTE $0xcb // mov r11, rcx QUAD $0x05033e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 3], 5 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x06030e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 3], 6 - QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x0000008024bc8b48 // mov rdi, qword [rsp + 128] QUAD $0x07033e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 3], 7 - LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] QUAD $0x030654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r8 + 3], 8 QUAD $0x09031e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 3], 9 - QUAD $0x033654203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r14 + 3], 10 - WORD $0x894d; BYTE $0xfe // mov r14, r15 - QUAD $0x033e54203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r15 + 3], 11 - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] - QUAD $0x033e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r15 + 3], 12 + QUAD $0x033e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r15 + 3], 10 + WORD $0x894d; BYTE $0xe7 // mov r15, r12 + QUAD $0x032654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r12 + 3], 11 + LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] + QUAD $0x032654203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r12 + 3], 12 QUAD $0x031654203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r10 + 3], 13 - QUAD $0x032654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r12 + 3], 14 + QUAD $0x033654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r14 + 3], 14 QUAD $0x030e54203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r9 + 3], 15 QUAD $0x0104164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 4], 1 QUAD $0x0204064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 4], 2 QUAD $0x042e4c203a0f4266; BYTE $0x03 // pinsrb xmm1, byte [rsi + r13 + 4], 3 QUAD $0x041e4c203a0f4266; BYTE $0x04 // pinsrb xmm1, byte [rsi + r11 + 4], 4 - LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] + LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] QUAD $0x041e4c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r11 + 4], 5 QUAD $0x06040e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 4], 6 QUAD $0x07043e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 4], 7 QUAD $0x04064c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r8 + 4], 8 QUAD $0x09041e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 4], 9 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] QUAD $0x0a040e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 4], 10 - QUAD $0x04364c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r14 + 4], 11 - QUAD $0x043e4c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r15 + 4], 12 + QUAD $0x043e4c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r15 + 4], 11 + QUAD $0x04264c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r12 + 4], 12 QUAD $0x04164c203a0f4266; BYTE $0x0d // pinsrb xmm1, byte [rsi + r10 + 4], 13 - WORD $0x894d; BYTE $0xd7 // mov r15, r10 - QUAD $0x04264c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rsi + r12 + 4], 14 - WORD $0x894d; BYTE $0xe2 // mov r10, r12 + QUAD $0x04364c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rsi + r14 + 4], 14 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 QUAD $0x040e4c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r9 + 4], 15 LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] LONG $0x3e54b60f; BYTE $0x11 // movzx edx, byte [rsi + rdi + 17] LONG $0xc26e0f66 // movd xmm0, edx LONG $0x740f4166; BYTE $0xd1 // pcmpeqb xmm2, xmm9 @@ -5855,34 +6311,35 @@ LBB1_87: LONG $0xcaeb0f66 // por xmm1, xmm2 LONG $0x3e54b60f; BYTE $0x12 // movzx edx, byte [rsi + rdi + 18] LONG $0xea6e0f66 // movd xmm5, edx - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] + LONG $0x244c8b4c; BYTE $0x08 // mov r9, qword [rsp + 8] QUAD $0x050e44203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rsi + r9 + 5], 1 QUAD $0x050644203a0f4466; BYTE $0x02 // pinsrb xmm8, byte [rsi + rax + 5], 2 QUAD $0x052e44203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r13 + 5], 3 - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] + LONG $0x246c894c; BYTE $0x60 // mov qword [rsp + 96], r13 + LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] QUAD $0x051644203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rdx + 5], 4 QUAD $0x051e44203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r11 + 5], 5 - QUAD $0x000000b024bc8b48 // mov rdi, qword [rsp + 176] + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x053e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rdi + 5], 6 - QUAD $0x000000a024848b4c // mov r8, qword [rsp + 160] - QUAD $0x050644203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r8 + 5], 7 - LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] + QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] + QUAD $0x052644203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r12 + 5], 7 + LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] QUAD $0x051644203a0f4466; BYTE $0x08 // pinsrb xmm8, byte [rsi + rdx + 5], 8 QUAD $0x051e44203a0f4466; BYTE $0x09 // pinsrb xmm8, byte [rsi + rbx + 5], 9 QUAD $0x050e44203a0f4466; BYTE $0x0a // pinsrb xmm8, byte [rsi + rcx + 5], 10 - QUAD $0x053644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r14 + 5], 11 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + QUAD $0x053e44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r15 + 5], 11 + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] QUAD $0x050e44203a0f4466; BYTE $0x0c // pinsrb xmm8, byte [rsi + rcx + 5], 12 - QUAD $0x053e44203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r15 + 5], 13 - WORD $0x894d; BYTE $0xfc // mov r12, r15 - QUAD $0x051644203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r10 + 5], 14 - LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] + QUAD $0x051644203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r10 + 5], 13 + WORD $0x894d; BYTE $0xd6 // mov r14, r10 + QUAD $0x050644203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r8 + 5], 14 + LONG $0x24548b4c; BYTE $0x40 // mov r10, qword [rsp + 64] QUAD $0x051644203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r10 + 5], 15 LONG $0x740f4566; BYTE $0xc1 // pcmpeqb xmm8, xmm9 QUAD $0x000000e0956f0f66 // movdqa xmm2, oword 224[rbp] /* [rip + .LCPI1_14] */ LONG $0xdb0f4466; BYTE $0xc2 // pand xmm8, xmm2 LONG $0xeb0f4466; BYTE $0xc1 // por xmm8, xmm1 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] LONG $0x0e54b60f; BYTE $0x13 // movzx edx, byte [rsi + rcx + 19] LONG $0xfa6e0f66 // movd xmm7, edx LONG $0xeb0f4466; BYTE $0xc6 // por xmm8, xmm6 @@ -5891,47 +6348,48 @@ LBB1_87: QUAD $0x060e5c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r9 + 6], 1 QUAD $0x0206065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 2 QUAD $0x062e5c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r13 + 6], 3 - LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] + LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] QUAD $0x061e5c203a0f4266; BYTE $0x04 // pinsrb xmm3, byte [rsi + r11 + 6], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x063e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r15 + 6], 5 + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] + QUAD $0x06065c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r8 + 6], 5 WORD $0x8948; BYTE $0xf9 // mov rcx, rdi QUAD $0x06063e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 6], 6 - WORD $0x894c; BYTE $0xc7 // mov rdi, r8 - QUAD $0x06065c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r8 + 6], 7 - LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] + WORD $0x894c; BYTE $0xe7 // mov rdi, r12 + QUAD $0x06265c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r12 + 6], 7 + LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] QUAD $0x0806165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 6], 8 QUAD $0x09061e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 6], 9 - LONG $0x24448b4c; BYTE $0x30 // mov r8, qword [rsp + 48] - QUAD $0x06065c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r8 + 6], 10 - QUAD $0x06365c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r14 + 6], 11 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x0c06065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 12 - QUAD $0x06265c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r12 + 6], 13 - WORD $0x894d; BYTE $0xe5 // mov r13, r12 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] - QUAD $0x0e06165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 6], 14 + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + QUAD $0x0a06165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 6], 10 + QUAD $0x063e5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r15 + 6], 11 + LONG $0x24648b4c; BYTE $0x68 // mov r12, qword [rsp + 104] + QUAD $0x06265c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r12 + 6], 12 + QUAD $0x06365c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r14 + 6], 13 + WORD $0x894d; BYTE $0xf5 // mov r13, r14 + LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] + QUAD $0x06365c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rsi + r14 + 6], 14 QUAD $0x06165c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r10 + 6], 15 - QUAD $0x0000e024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 224] + QUAD $0x0000b024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 176] QUAD $0x070e54203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r9 + 7], 1 - LONG $0x24648b4c; BYTE $0x40 // mov r12, qword [rsp + 64] - QUAD $0x072654203a0f4266; BYTE $0x02 // pinsrb xmm2, byte [rsi + r12 + 7], 2 - LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] - QUAD $0x03071654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 7], 3 + WORD $0x8949; BYTE $0xc2 // mov r10, rax + QUAD $0x02070654203a0f66 // pinsrb xmm2, byte [rsi + rax + 7], 2 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x03070654203a0f66 // pinsrb xmm2, byte [rsi + rax + 7], 3 QUAD $0x071e54203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r11 + 7], 4 - QUAD $0x073e54203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r15 + 7], 5 + QUAD $0x070654203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r8 + 7], 5 QUAD $0x06070e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 7], 6 QUAD $0x07073e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 7], 7 - LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] - QUAD $0x071654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r10 + 7], 8 + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] + QUAD $0x070654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r8 + 7], 8 QUAD $0x09071e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 7], 9 - QUAD $0x070654203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r8 + 7], 10 - QUAD $0x073654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r14 + 7], 11 - QUAD $0x0c070654203a0f66 // pinsrb xmm2, byte [rsi + rax + 7], 12 + QUAD $0x0a071654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 7], 10 + QUAD $0x073e54203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r15 + 7], 11 + QUAD $0x072654203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r12 + 7], 12 QUAD $0x072e54203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r13 + 7], 13 - LONG $0x247c8b48; BYTE $0x18 // mov rdi, qword [rsp + 24] - QUAD $0x0e073e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 7], 14 - LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] + WORD $0x894c; BYTE $0xf7 // mov rdi, r14 + QUAD $0x073654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r14 + 7], 14 + LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] QUAD $0x070e54203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r9 + 7], 15 LONG $0x740f4166; BYTE $0xd9 // pcmpeqb xmm3, xmm9 QUAD $0x000000f08d6f0f66 // movdqa xmm1, oword 240[rbp] /* [rip + .LCPI1_15] */ @@ -5942,38 +6400,36 @@ LBB1_87: LONG $0xd1db0f66 // pand xmm2, xmm1 LONG $0xd3eb0f66 // por xmm2, xmm3 LONG $0xca6f0f66 // movdqa xmm1, xmm2 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] LONG $0x1e54b60f; BYTE $0x15 // movzx edx, byte [rsi + rbx + 21] LONG $0xd26e0f66 // movd xmm2, edx - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] + LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] QUAD $0x091e54203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rsi + r11 + 9], 1 - QUAD $0x092654203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r12 + 9], 2 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x090654203a0f4466; BYTE $0x03 // pinsrb xmm10, byte [rsi + rax + 9], 3 - LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] + QUAD $0x091654203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r10 + 9], 2 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x090e54203a0f4466; BYTE $0x03 // pinsrb xmm10, byte [rsi + rcx + 9], 3 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x090e54203a0f4466; BYTE $0x04 // pinsrb xmm10, byte [rsi + rcx + 9], 4 - LONG $0x247c8b4c; BYTE $0x60 // mov r15, qword [rsp + 96] - QUAD $0x093e54203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r15 + 9], 5 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] - QUAD $0x092654203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r12 + 9], 6 - QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + QUAD $0x091654203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rdx + 9], 5 + LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] + QUAD $0x093654203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r14 + 9], 6 QUAD $0x090654203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rax + 9], 7 - QUAD $0x091654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r10 + 9], 8 - WORD $0x894d; BYTE $0xd6 // mov r14, r10 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x090654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r8 + 9], 8 + WORD $0x894d; BYTE $0xc7 // mov r15, r8 + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x090654203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rax + 9], 9 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x090654203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rax + 9], 10 - LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] + LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] + QUAD $0x091654203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r10 + 9], 10 + QUAD $0x000000d024948b48 // mov rdx, qword [rsp + 208] QUAD $0x091654203a0f4466; BYTE $0x0b // pinsrb xmm10, byte [rsi + rdx + 9], 11 - LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] - QUAD $0x091654203a0f4466; BYTE $0x0c // pinsrb xmm10, byte [rsi + rdx + 9], 12 - WORD $0x894d; BYTE $0xea // mov r10, r13 + QUAD $0x092654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 9], 12 + WORD $0x894c; BYTE $0xe8 // mov rax, r13 QUAD $0x092e54203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r13 + 9], 13 QUAD $0x093e54203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rdi + 9], 14 QUAD $0x090e54203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r9 + 9], 15 LONG $0xeb0f4166; BYTE $0xc8 // por xmm1, xmm8 - QUAD $0x0000e0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm1 + QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 LONG $0x740f4566; BYTE $0xd1 // pcmpeqb xmm10, xmm9 LONG $0x6f0f4166; BYTE $0xca // movdqa xmm1, xmm10 LONG $0x6f0f4466; BYTE $0xc4 // movdqa xmm8, xmm4 @@ -5983,130 +6439,128 @@ LBB1_87: LONG $0xda6e0f66 // movd xmm3, edx QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] QUAD $0x081e64203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r11 + 8], 1 - LONG $0x246c8b4c; BYTE $0x40 // mov r13, qword [rsp + 64] + LONG $0x246c8b4c; BYTE $0x50 // mov r13, qword [rsp + 80] QUAD $0x082e64203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r13 + 8], 2 - LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] + LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] QUAD $0x080664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r8 + 8], 3 QUAD $0x04080e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 8], 4 - WORD $0x894d; BYTE $0xf9 // mov r9, r15 - QUAD $0x083e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r15 + 8], 5 - QUAD $0x082664203a0f4266; BYTE $0x06 // pinsrb xmm4, byte [rsi + r12 + 8], 6 - QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x083e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r15 + 8], 7 - QUAD $0x083664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r14 + 8], 8 - WORD $0x894c; BYTE $0xf3 // mov rbx, r14 - QUAD $0x0000008024948b48 // mov rdx, qword [rsp + 128] + LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] + QUAD $0x080e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r9 + 8], 5 + QUAD $0x083664203a0f4266; BYTE $0x06 // pinsrb xmm4, byte [rsi + r14 + 8], 6 + QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] + QUAD $0x083664203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r14 + 8], 7 + QUAD $0x083e64203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r15 + 8], 8 + WORD $0x894c; BYTE $0xfb // mov rbx, r15 + LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] QUAD $0x09081664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 8], 9 - QUAD $0x0a080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 11 - LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] - QUAD $0x083664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r14 + 8], 12 - QUAD $0x081664203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r10 + 8], 13 + QUAD $0x081664203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r10 + 8], 10 + QUAD $0x000000d024bc8b4c // mov r15, qword [rsp + 208] + QUAD $0x083e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 8], 11 + QUAD $0x082664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 8], 12 + QUAD $0x0d080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 13 QUAD $0x0e083e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 8], 14 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0f080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 15 LONG $0x740f4166; BYTE $0xe1 // pcmpeqb xmm4, xmm9 LONG $0xdb0f4166; BYTE $0xe0 // pand xmm4, xmm8 - QUAD $0x00c024946f0f4466; WORD $0x0000 // movdqa xmm10, oword [rsp + 192] + QUAD $0x00a024946f0f4466; WORD $0x0000 // movdqa xmm10, oword [rsp + 160] QUAD $0x0a1e54203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rsi + r11 + 10], 1 QUAD $0x0a2e54203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r13 + 10], 2 QUAD $0x0a0654203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r8 + 10], 3 - WORD $0x894d; BYTE $0xc4 // mov r12, r8 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 QUAD $0x0a0e54203a0f4466; BYTE $0x04 // pinsrb xmm10, byte [rsi + rcx + 10], 4 QUAD $0x0a0e54203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r9 + 10], 5 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] QUAD $0x0a0e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rcx + 10], 6 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - QUAD $0x0a3e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r15 + 10], 7 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 + QUAD $0x0a3654203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r14 + 10], 7 QUAD $0x0a1e54203a0f4466; BYTE $0x08 // pinsrb xmm10, byte [rsi + rbx + 10], 8 QUAD $0x0a1654203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rdx + 10], 9 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x0a1654203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rdx + 10], 10 WORD $0x8948; BYTE $0xd3 // mov rbx, rdx - LONG $0x247c8b4c; BYTE $0x58 // mov r15, qword [rsp + 88] QUAD $0x0a3e54203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r15 + 10], 11 - QUAD $0x0a3654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r14 + 10], 12 - QUAD $0x0a1654203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r10 + 10], 13 + QUAD $0x0a2654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 10], 12 + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + QUAD $0x0a3654203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r14 + 10], 13 QUAD $0x0a3e54203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rdi + 10], 14 QUAD $0x0a0654203a0f4466; BYTE $0x0f // pinsrb xmm10, byte [rsi + rax + 10], 15 LONG $0x740f4566; BYTE $0xd1 // pcmpeqb xmm10, xmm9 QUAD $0x0000b095db0f4466; BYTE $0x00 // pand xmm10, oword 176[rbp] /* [rip + .LCPI1_11] */ LONG $0xeb0f4466; BYTE $0xd4 // por xmm10, xmm4 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] LONG $0x0654b60f; BYTE $0x17 // movzx edx, byte [rsi + rax + 23] LONG $0x6e0f4466; BYTE $0xc2 // movd xmm8, edx LONG $0xeb0f4466; BYTE $0xd1 // por xmm10, xmm1 - QUAD $0x00c024947f0f4466; WORD $0x0000 // movdqa oword [rsp + 192], xmm10 + QUAD $0x00a024947f0f4466; WORD $0x0000 // movdqa oword [rsp + 160], xmm10 LONG $0x0654b60f; BYTE $0x18 // movzx edx, byte [rsi + rax + 24] LONG $0x6e0f4466; BYTE $0xd2 // movd xmm10, edx QUAD $0x0b1e5c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rsi + r11 + 11], 1 QUAD $0x0b2e5c203a0f4666; BYTE $0x02 // pinsrb xmm11, byte [rsi + r13 + 11], 2 - QUAD $0x0b265c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r12 + 11], 3 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0b165c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r10 + 11], 3 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0b065c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rax + 11], 4 QUAD $0x0b0e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r9 + 11], 5 QUAD $0x0b0e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rcx + 11], 6 WORD $0x894c; BYTE $0xc7 // mov rdi, r8 QUAD $0x0b065c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r8 + 11], 7 - LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] QUAD $0x0b065c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r8 + 11], 8 - QUAD $0x00000080248c8b4c // mov r9, qword [rsp + 128] + LONG $0x244c8b4c; BYTE $0x78 // mov r9, qword [rsp + 120] QUAD $0x0b0e5c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r9 + 11], 9 QUAD $0x0b1e5c203a0f4466; BYTE $0x0a // pinsrb xmm11, byte [rsi + rbx + 11], 10 - WORD $0x894d; BYTE $0xfe // mov r14, r15 QUAD $0x0b3e5c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r15 + 11], 11 - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] - QUAD $0x0b3e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r15 + 11], 12 - QUAD $0x0b165c203a0f4666; BYTE $0x0d // pinsrb xmm11, byte [rsi + r10 + 11], 13 - LONG $0x24648b4c; BYTE $0x18 // mov r12, qword [rsp + 24] - QUAD $0x0b265c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r12 + 11], 14 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + QUAD $0x0b265c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r12 + 11], 12 + WORD $0x894d; BYTE $0xf2 // mov r10, r14 + QUAD $0x0b365c203a0f4666; BYTE $0x0d // pinsrb xmm11, byte [rsi + r14 + 11], 13 + LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] + QUAD $0x0b365c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r14 + 11], 14 + LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] QUAD $0x0b165c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rdx + 11], 15 QUAD $0x0c1e6c203a0f4666; BYTE $0x01 // pinsrb xmm13, byte [rsi + r11 + 12], 1 QUAD $0x0c2e6c203a0f4666; BYTE $0x02 // pinsrb xmm13, byte [rsi + r13 + 12], 2 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] QUAD $0x0c1e6c203a0f4466; BYTE $0x03 // pinsrb xmm13, byte [rsi + rbx + 12], 3 QUAD $0x0c066c203a0f4466; BYTE $0x04 // pinsrb xmm13, byte [rsi + rax + 12], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c066c203a0f4466; BYTE $0x05 // pinsrb xmm13, byte [rsi + rax + 12], 5 QUAD $0x0c0e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rcx + 12], 6 QUAD $0x0c3e6c203a0f4466; BYTE $0x07 // pinsrb xmm13, byte [rsi + rdi + 12], 7 QUAD $0x0c066c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r8 + 12], 8 QUAD $0x0c0e6c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r9 + 12], 9 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] QUAD $0x0c1e6c203a0f4466; BYTE $0x0a // pinsrb xmm13, byte [rsi + rbx + 12], 10 - QUAD $0x0c366c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r14 + 12], 11 - QUAD $0x0c3e6c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r15 + 12], 12 + QUAD $0x0c3e6c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r15 + 12], 11 + QUAD $0x0c266c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r12 + 12], 12 QUAD $0x0c166c203a0f4666; BYTE $0x0d // pinsrb xmm13, byte [rsi + r10 + 12], 13 WORD $0x894d; BYTE $0xd3 // mov r11, r10 - QUAD $0x0c266c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r12 + 12], 14 + QUAD $0x0c366c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r14 + 12], 14 QUAD $0x0c166c203a0f4466; BYTE $0x0f // pinsrb xmm13, byte [rsi + rdx + 12], 15 - LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] + LONG $0x24548b4c; BYTE $0x08 // mov r10, qword [rsp + 8] QUAD $0x0d1664203a0f4666; BYTE $0x01 // pinsrb xmm12, byte [rsi + r10 + 13], 1 QUAD $0x0d2e64203a0f4666; BYTE $0x02 // pinsrb xmm12, byte [rsi + r13 + 13], 2 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] QUAD $0x0d2e64203a0f4666; BYTE $0x03 // pinsrb xmm12, byte [rsi + r13 + 13], 3 - LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x0d1e64203a0f4466; BYTE $0x04 // pinsrb xmm12, byte [rsi + rbx + 13], 4 QUAD $0x0d0664203a0f4466; BYTE $0x05 // pinsrb xmm12, byte [rsi + rax + 13], 5 QUAD $0x0d0e64203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rcx + 13], 6 QUAD $0x0d3e64203a0f4466; BYTE $0x07 // pinsrb xmm12, byte [rsi + rdi + 13], 7 QUAD $0x0d0664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r8 + 13], 8 QUAD $0x0d0e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r9 + 13], 9 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] QUAD $0x0d1e64203a0f4466; BYTE $0x0a // pinsrb xmm12, byte [rsi + rbx + 13], 10 - QUAD $0x0d3664203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r14 + 13], 11 - QUAD $0x0d3e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r15 + 13], 12 + QUAD $0x0d3e64203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r15 + 13], 11 + QUAD $0x0d2664203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r12 + 13], 12 QUAD $0x0d1e64203a0f4666; BYTE $0x0d // pinsrb xmm12, byte [rsi + r11 + 13], 13 - QUAD $0x0d2664203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rsi + r12 + 13], 14 + QUAD $0x0d3664203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rsi + r14 + 13], 14 QUAD $0x0d1664203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rsi + rdx + 13], 15 LONG $0x740f4566; BYTE $0xd9 // pcmpeqb xmm11, xmm9 QUAD $0x0000c09ddb0f4466; BYTE $0x00 // pand xmm11, oword 192[rbp] /* [rip + .LCPI1_12] */ LONG $0x740f4566; BYTE $0xe9 // pcmpeqb xmm13, xmm9 QUAD $0x0000d0addb0f4466; BYTE $0x00 // pand xmm13, oword 208[rbp] /* [rip + .LCPI1_13] */ LONG $0xeb0f4566; BYTE $0xeb // por xmm13, xmm11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] LONG $0x0654b60f; BYTE $0x19 // movzx edx, byte [rsi + rax + 25] LONG $0xca6e0f66 // movd xmm1, edx LONG $0x740f4566; BYTE $0xe1 // pcmpeqb xmm12, xmm9 @@ -6114,32 +6568,32 @@ LBB1_87: LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 LONG $0x0654b60f; BYTE $0x1a // movzx edx, byte [rsi + rax + 26] LONG $0x6e0f4466; BYTE $0xda // movd xmm11, edx - QUAD $0x00012024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 288] + QUAD $0x0000f024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 240] QUAD $0x0e1664203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r10 + 14], 1 - LONG $0x24648b4c; BYTE $0x40 // mov r12, qword [rsp + 64] - QUAD $0x0e2664203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r12 + 14], 2 + LONG $0x24748b4c; BYTE $0x50 // mov r14, qword [rsp + 80] + QUAD $0x0e3664203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r14 + 14], 2 WORD $0x894d; BYTE $0xea // mov r10, r13 QUAD $0x0e2e64203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r13 + 14], 3 - LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] + LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] QUAD $0x0e1e64203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r11 + 14], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x050e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 14], 5 QUAD $0x060e0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 14], 6 QUAD $0x070e3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 14], 7 QUAD $0x0e0664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r8 + 14], 8 QUAD $0x0e0e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 14], 9 QUAD $0x0a0e1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 14], 10 - QUAD $0x0e3664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r14 + 14], 11 - QUAD $0x0e3e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r15 + 14], 12 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x0e3e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 14], 11 + QUAD $0x0e2664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 14], 12 + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x0d0e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 14], 13 - LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] + LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] QUAD $0x0e2e64203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r13 + 14], 14 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] QUAD $0x0f0e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 14], 15 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x0f1674203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rsi + rdx + 15], 1 - QUAD $0x0f2674203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r12 + 15], 2 + QUAD $0x0f3674203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r14 + 15], 2 QUAD $0x0f1674203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r10 + 15], 3 QUAD $0x0f1e74203a0f4666; BYTE $0x04 // pinsrb xmm14, byte [rsi + r11 + 15], 4 QUAD $0x0f0674203a0f4466; BYTE $0x05 // pinsrb xmm14, byte [rsi + rax + 15], 5 @@ -6148,16 +6602,16 @@ LBB1_87: QUAD $0x0f0674203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r8 + 15], 8 QUAD $0x0f0e74203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r9 + 15], 9 QUAD $0x0f1e74203a0f4466; BYTE $0x0a // pinsrb xmm14, byte [rsi + rbx + 15], 10 - QUAD $0x0f3674203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r14 + 15], 11 - QUAD $0x0f3e74203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r15 + 15], 12 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x0f3e74203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r15 + 15], 11 + QUAD $0x0f2674203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r12 + 15], 12 + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x0f1674203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rsi + rdx + 15], 13 QUAD $0x0f2e74203a0f4666; BYTE $0x0e // pinsrb xmm14, byte [rsi + r13 + 15], 14 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] QUAD $0x0f1674203a0f4466; BYTE $0x0f // pinsrb xmm14, byte [rsi + rdx + 15], 15 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x10167c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rsi + rdx + 16], 1 - QUAD $0x10267c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r12 + 16], 2 + QUAD $0x10367c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r14 + 16], 2 QUAD $0x10167c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r10 + 16], 3 QUAD $0x101e7c203a0f4666; BYTE $0x04 // pinsrb xmm15, byte [rsi + r11 + 16], 4 QUAD $0x10067c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rax + 16], 5 @@ -6166,14 +6620,14 @@ LBB1_87: QUAD $0x10067c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r8 + 16], 8 QUAD $0x100e7c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r9 + 16], 9 QUAD $0x101e7c203a0f4466; BYTE $0x0a // pinsrb xmm15, byte [rsi + rbx + 16], 10 - QUAD $0x10367c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r14 + 16], 11 - QUAD $0x103e7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r15 + 16], 12 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x103e7c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r15 + 16], 11 + QUAD $0x10267c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r12 + 16], 12 + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x10167c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rsi + rdx + 16], 13 QUAD $0x102e7c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r13 + 16], 14 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x01111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 1 - QUAD $0x112644203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r12 + 17], 2 + QUAD $0x113644203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r14 + 17], 2 QUAD $0x111644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r10 + 17], 3 QUAD $0x111e44203a0f4266; BYTE $0x04 // pinsrb xmm0, byte [rsi + r11 + 17], 4 QUAD $0x05110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 5 @@ -6183,26 +6637,26 @@ LBB1_87: QUAD $0x110644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r8 + 17], 8 QUAD $0x110e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 17], 9 QUAD $0x0a111e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 17], 10 - QUAD $0x113644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 17], 11 - QUAD $0x113e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r15 + 17], 12 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x113e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r15 + 17], 11 + QUAD $0x112644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r12 + 17], 12 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0d110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 13 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] QUAD $0x0e111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 14 - QUAD $0x00c024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 192] - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] - LONG $0x54b60f42; WORD $0x1b26 // movzx edx, byte [rsi + r12 + 27] + QUAD $0x00a024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 160] + LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] + LONG $0x54b60f42; WORD $0x1b36 // movzx edx, byte [rsi + r14 + 27] LONG $0x6e0f4466; BYTE $0xca // movd xmm9, edx - QUAD $0x00d024ac6f0f4466; WORD $0x0000 // movdqa xmm13, oword [rsp + 208] + QUAD $0x00c024ac6f0f4466; WORD $0x0000 // movdqa xmm13, oword [rsp + 192] LONG $0x740f4166; BYTE $0xe5 // pcmpeqb xmm4, xmm13 QUAD $0x000000f0a5db0f66 // pand xmm4, oword 240[rbp] /* [rip + .LCPI1_15] */ LONG $0x740f4566; BYTE $0xf5 // pcmpeqb xmm14, xmm13 LONG $0x710f4166; WORD $0x07f6 // psllw xmm14, 7 LONG $0xdb0f4466; WORD $0x6075 // pand xmm14, oword 96[rbp] /* [rip + .LCPI1_6] */ LONG $0xeb0f4466; BYTE $0xf4 // por xmm14, xmm4 - LONG $0x54b60f42; WORD $0x1c26 // movzx edx, byte [rsi + r12 + 28] + LONG $0x54b60f42; WORD $0x1c36 // movzx edx, byte [rsi + r14 + 28] LONG $0xe26e0f66 // movd xmm4, edx - LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] + LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] QUAD $0x110644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r8 + 17], 15 LONG $0xeb0f4566; BYTE $0xf4 // por xmm14, xmm12 LONG $0x740f4166; BYTE $0xc5 // pcmpeqb xmm0, xmm13 @@ -6210,54 +6664,54 @@ LBB1_87: QUAD $0x0000a0a56f0f4466; BYTE $0x00 // movdqa xmm12, oword 160[rbp] /* [rip + .LCPI1_10] */ LONG $0xdb0f4566; BYTE $0xec // pand xmm13, xmm12 LONG $0xf80f4466; BYTE $0xe8 // psubb xmm13, xmm0 - QUAD $0x00c024ac7f0f4466; WORD $0x0000 // movdqa oword [rsp + 192], xmm13 - LONG $0x54b60f42; WORD $0x1d26 // movzx edx, byte [rsi + r12 + 29] + QUAD $0x00a024ac7f0f4466; WORD $0x0000 // movdqa oword [rsp + 160], xmm13 + LONG $0x54b60f42; WORD $0x1d36 // movzx edx, byte [rsi + r14 + 29] LONG $0x6e0f4466; BYTE $0xea // movd xmm13, edx QUAD $0x10067c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r8 + 16], 15 - QUAD $0x0000d024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 208] + QUAD $0x0000c024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 192] LONG $0x740f4466; BYTE $0xf8 // pcmpeqb xmm15, xmm0 - LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] - QUAD $0x12266c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r12 + 18], 1 - LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x08 // mov r14, qword [rsp + 8] + QUAD $0x12366c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r14 + 18], 1 + LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] QUAD $0x0212166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 2 QUAD $0x12166c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r10 + 18], 3 QUAD $0x121e6c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r11 + 18], 4 QUAD $0x122e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r13 + 18], 5 QUAD $0x06120e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 18], 6 QUAD $0x07123e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 18], 7 - LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] + LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] QUAD $0x0812166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 8 QUAD $0x120e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r9 + 18], 9 QUAD $0x0a121e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 18], 10 - QUAD $0x12366c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r14 + 18], 11 - QUAD $0x123e6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r15 + 18], 12 + QUAD $0x123e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 18], 11 + QUAD $0x12266c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r12 + 18], 12 QUAD $0x0d12066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 18], 13 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0e12066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 18], 14 LONG $0xdb0f4566; BYTE $0xfc // pand xmm15, xmm12 QUAD $0x12066c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r8 + 18], 15 LONG $0xe8740f66 // pcmpeqb xmm5, xmm0 QUAD $0x000000b0addb0f66 // pand xmm5, oword 176[rbp] /* [rip + .LCPI1_11] */ LONG $0xeb0f4166; BYTE $0xef // por xmm5, xmm15 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] LONG $0x0654b60f; BYTE $0x1e // movzx edx, byte [rsi + rax + 30] LONG $0x6e0f4466; BYTE $0xe2 // movd xmm12, edx - QUAD $0x13267c203a0f4266; BYTE $0x01 // pinsrb xmm7, byte [rsi + r12 + 19], 1 - QUAD $0x142674203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r12 + 20], 1 - QUAD $0x152654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r12 + 21], 1 - QUAD $0x16265c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r12 + 22], 1 - QUAD $0x172644203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rsi + r12 + 23], 1 - QUAD $0x182654203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rsi + r12 + 24], 1 - QUAD $0x19264c203a0f4266; BYTE $0x01 // pinsrb xmm1, byte [rsi + r12 + 25], 1 - QUAD $0x1a265c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rsi + r12 + 26], 1 - QUAD $0x1b264c203a0f4666; BYTE $0x01 // pinsrb xmm9, byte [rsi + r12 + 27], 1 - QUAD $0x1c2664203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r12 + 28], 1 - QUAD $0x1d266c203a0f4666; BYTE $0x01 // pinsrb xmm13, byte [rsi + r12 + 29], 1 - QUAD $0x1e2664203a0f4666; BYTE $0x01 // pinsrb xmm12, byte [rsi + r12 + 30], 1 + QUAD $0x13367c203a0f4266; BYTE $0x01 // pinsrb xmm7, byte [rsi + r14 + 19], 1 + QUAD $0x143674203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r14 + 20], 1 + QUAD $0x153654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r14 + 21], 1 + QUAD $0x16365c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r14 + 22], 1 + QUAD $0x173644203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rsi + r14 + 23], 1 + QUAD $0x183654203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rsi + r14 + 24], 1 + QUAD $0x19364c203a0f4266; BYTE $0x01 // pinsrb xmm1, byte [rsi + r14 + 25], 1 + QUAD $0x1a365c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rsi + r14 + 26], 1 + QUAD $0x1b364c203a0f4666; BYTE $0x01 // pinsrb xmm9, byte [rsi + r14 + 27], 1 + QUAD $0x1c3664203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r14 + 28], 1 + QUAD $0x1d366c203a0f4666; BYTE $0x01 // pinsrb xmm13, byte [rsi + r14 + 29], 1 + QUAD $0x1e3664203a0f4666; BYTE $0x01 // pinsrb xmm12, byte [rsi + r14 + 30], 1 LONG $0x0654b60f; BYTE $0x1f // movzx edx, byte [rsi + rax + 31] LONG $0xc26e0f66 // movd xmm0, edx - QUAD $0x1f2644203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r12 + 31], 1 - LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] + QUAD $0x1f3644203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r14 + 31], 1 + LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] QUAD $0x0213167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 19], 2 QUAD $0x02141674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 20], 2 QUAD $0x02151654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 21], 2 @@ -6276,15 +6730,15 @@ LBB1_87: QUAD $0x132e7c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r13 + 19], 5 QUAD $0x06130e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 19], 6 QUAD $0x07133e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 19], 7 - LONG $0x24648b4c; BYTE $0x48 // mov r12, qword [rsp + 72] - QUAD $0x13267c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r12 + 19], 8 + LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] + QUAD $0x13367c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r14 + 19], 8 QUAD $0x130e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r9 + 19], 9 QUAD $0x0a131e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 19], 10 - QUAD $0x13367c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r14 + 19], 11 - QUAD $0x133e7c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r15 + 19], 12 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x133e7c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r15 + 19], 11 + QUAD $0x13267c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r12 + 19], 12 + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x0d13167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 19], 13 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0e13067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 19], 14 QUAD $0x13067c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r8 + 19], 15 QUAD $0x141674203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r10 + 20], 3 @@ -6292,16 +6746,16 @@ LBB1_87: QUAD $0x142e74203a0f4266; BYTE $0x05 // pinsrb xmm6, byte [rsi + r13 + 20], 5 QUAD $0x06140e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 20], 6 QUAD $0x07143e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 20], 7 - QUAD $0x142674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r12 + 20], 8 + QUAD $0x143674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r14 + 20], 8 QUAD $0x140e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r9 + 20], 9 QUAD $0x0a141e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 20], 10 - QUAD $0x143674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r14 + 20], 11 - QUAD $0x143e74203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r15 + 20], 12 + QUAD $0x143e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r15 + 20], 11 + QUAD $0x142674203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r12 + 20], 12 QUAD $0x0d141674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 20], 13 QUAD $0x0e140674203a0f66 // pinsrb xmm6, byte [rsi + rax + 20], 14 - QUAD $0x0000c024aceb0f66; BYTE $0x00 // por xmm5, oword [rsp + 192] + QUAD $0x0000a024aceb0f66; BYTE $0x00 // por xmm5, oword [rsp + 160] QUAD $0x140674203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r8 + 20], 15 - QUAD $0x00d024bc6f0f4466; WORD $0x0000 // movdqa xmm15, oword [rsp + 208] + QUAD $0x00c024bc6f0f4466; WORD $0x0000 // movdqa xmm15, oword [rsp + 192] LONG $0x740f4166; BYTE $0xff // pcmpeqb xmm7, xmm15 QUAD $0x000000c0bddb0f66 // pand xmm7, oword 192[rbp] /* [rip + .LCPI1_12] */ LONG $0x740f4166; BYTE $0xf7 // pcmpeqb xmm6, xmm15 @@ -6312,11 +6766,11 @@ LBB1_87: QUAD $0x152e54203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r13 + 21], 5 QUAD $0x06150e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 21], 6 QUAD $0x07153e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 21], 7 - QUAD $0x152654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r12 + 21], 8 + QUAD $0x153654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r14 + 21], 8 QUAD $0x150e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r9 + 21], 9 QUAD $0x0a151e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 21], 10 - QUAD $0x153654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r14 + 21], 11 - QUAD $0x153e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r15 + 21], 12 + QUAD $0x153e54203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r15 + 21], 11 + QUAD $0x152654203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r12 + 21], 12 QUAD $0x0d151654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 21], 13 QUAD $0x0e150654203a0f66 // pinsrb xmm2, byte [rsi + rax + 21], 14 QUAD $0x150654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r8 + 21], 15 @@ -6330,11 +6784,11 @@ LBB1_87: QUAD $0x162e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r13 + 22], 5 QUAD $0x06160e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 22], 6 QUAD $0x07163e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 22], 7 - QUAD $0x16265c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r12 + 22], 8 + QUAD $0x16365c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r14 + 22], 8 QUAD $0x160e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r9 + 22], 9 QUAD $0x0a161e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 22], 10 - QUAD $0x16365c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r14 + 22], 11 - QUAD $0x163e5c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r15 + 22], 12 + QUAD $0x163e5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r15 + 22], 11 + QUAD $0x16265c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r12 + 22], 12 QUAD $0x0d16165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 22], 13 QUAD $0x0e16065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 22], 14 QUAD $0x16065c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r8 + 22], 15 @@ -6343,11 +6797,11 @@ LBB1_87: QUAD $0x172e44203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r13 + 23], 5 QUAD $0x170e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rcx + 23], 6 QUAD $0x173e44203a0f4466; BYTE $0x07 // pinsrb xmm8, byte [rsi + rdi + 23], 7 - QUAD $0x172644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r12 + 23], 8 + QUAD $0x173644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r14 + 23], 8 QUAD $0x170e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r9 + 23], 9 QUAD $0x171e44203a0f4466; BYTE $0x0a // pinsrb xmm8, byte [rsi + rbx + 23], 10 - QUAD $0x173644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r14 + 23], 11 - QUAD $0x173e44203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r15 + 23], 12 + QUAD $0x173e44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r15 + 23], 11 + QUAD $0x172644203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r12 + 23], 12 QUAD $0x171644203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rsi + rdx + 23], 13 QUAD $0x170644203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rax + 23], 14 QUAD $0x170644203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r8 + 23], 15 @@ -6364,11 +6818,11 @@ LBB1_87: QUAD $0x192e4c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r13 + 25], 5 QUAD $0x06190e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 25], 6 QUAD $0x07193e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 25], 7 - QUAD $0x19264c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r12 + 25], 8 + QUAD $0x19364c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r14 + 25], 8 QUAD $0x190e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r9 + 25], 9 QUAD $0x0a191e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 25], 10 - QUAD $0x19364c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r14 + 25], 11 - QUAD $0x193e4c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r15 + 25], 12 + QUAD $0x193e4c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r15 + 25], 11 + QUAD $0x19264c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r12 + 25], 12 QUAD $0x0d19164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 25], 13 QUAD $0x0e19064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 25], 14 QUAD $0x19064c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r8 + 25], 15 @@ -6383,11 +6837,11 @@ LBB1_87: QUAD $0x182e54203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r13 + 24], 5 QUAD $0x180e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rcx + 24], 6 QUAD $0x183e54203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rdi + 24], 7 - QUAD $0x182654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r12 + 24], 8 + QUAD $0x183654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r14 + 24], 8 QUAD $0x180e54203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r9 + 24], 9 QUAD $0x181e54203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rbx + 24], 10 - QUAD $0x183654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r14 + 24], 11 - QUAD $0x183e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r15 + 24], 12 + QUAD $0x183e54203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r15 + 24], 11 + QUAD $0x182654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 24], 12 QUAD $0x181654203a0f4466; BYTE $0x0d // pinsrb xmm10, byte [rsi + rdx + 24], 13 QUAD $0x180654203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rax + 24], 14 QUAD $0x180654203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r8 + 24], 15 @@ -6398,11 +6852,11 @@ LBB1_87: QUAD $0x1a2e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r13 + 26], 5 QUAD $0x1a0e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rcx + 26], 6 QUAD $0x1a3e5c203a0f4466; BYTE $0x07 // pinsrb xmm11, byte [rsi + rdi + 26], 7 - QUAD $0x1a265c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r12 + 26], 8 + QUAD $0x1a365c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r14 + 26], 8 QUAD $0x1a0e5c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r9 + 26], 9 QUAD $0x1a1e5c203a0f4466; BYTE $0x0a // pinsrb xmm11, byte [rsi + rbx + 26], 10 - QUAD $0x1a365c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r14 + 26], 11 - QUAD $0x1a3e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r15 + 26], 12 + QUAD $0x1a3e5c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r15 + 26], 11 + QUAD $0x1a265c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r12 + 26], 12 QUAD $0x1a165c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rdx + 26], 13 QUAD $0x1a065c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rax + 26], 14 QUAD $0x1a065c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r8 + 26], 15 @@ -6415,11 +6869,11 @@ LBB1_87: QUAD $0x1b2e4c203a0f4666; BYTE $0x05 // pinsrb xmm9, byte [rsi + r13 + 27], 5 QUAD $0x1b0e4c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rcx + 27], 6 QUAD $0x1b3e4c203a0f4466; BYTE $0x07 // pinsrb xmm9, byte [rsi + rdi + 27], 7 - QUAD $0x1b264c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r12 + 27], 8 + QUAD $0x1b364c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r14 + 27], 8 QUAD $0x1b0e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r9 + 27], 9 QUAD $0x1b1e4c203a0f4466; BYTE $0x0a // pinsrb xmm9, byte [rsi + rbx + 27], 10 - QUAD $0x1b364c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r14 + 27], 11 - QUAD $0x1b3e4c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r15 + 27], 12 + QUAD $0x1b3e4c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r15 + 27], 11 + QUAD $0x1b264c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r12 + 27], 12 QUAD $0x1b164c203a0f4466; BYTE $0x0d // pinsrb xmm9, byte [rsi + rdx + 27], 13 QUAD $0x1b064c203a0f4466; BYTE $0x0e // pinsrb xmm9, byte [rsi + rax + 27], 14 QUAD $0x1b064c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r8 + 27], 15 @@ -6428,11 +6882,11 @@ LBB1_87: QUAD $0x1c2e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r13 + 28], 5 QUAD $0x061c0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 28], 6 QUAD $0x071c3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 28], 7 - QUAD $0x1c2664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r12 + 28], 8 + QUAD $0x1c3664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r14 + 28], 8 QUAD $0x1c0e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 28], 9 QUAD $0x0a1c1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 28], 10 - QUAD $0x1c3664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r14 + 28], 11 - QUAD $0x1c3e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r15 + 28], 12 + QUAD $0x1c3e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 28], 11 + QUAD $0x1c2664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 28], 12 QUAD $0x0d1c1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 28], 13 QUAD $0x0e1c0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 28], 14 QUAD $0x1c0664203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r8 + 28], 15 @@ -6441,11 +6895,11 @@ LBB1_87: QUAD $0x1d2e6c203a0f4666; BYTE $0x05 // pinsrb xmm13, byte [rsi + r13 + 29], 5 QUAD $0x1d0e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rcx + 29], 6 QUAD $0x1d3e6c203a0f4466; BYTE $0x07 // pinsrb xmm13, byte [rsi + rdi + 29], 7 - QUAD $0x1d266c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r12 + 29], 8 + QUAD $0x1d366c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r14 + 29], 8 QUAD $0x1d0e6c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r9 + 29], 9 QUAD $0x1d1e6c203a0f4466; BYTE $0x0a // pinsrb xmm13, byte [rsi + rbx + 29], 10 - QUAD $0x1d366c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r14 + 29], 11 - QUAD $0x1d3e6c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r15 + 29], 12 + QUAD $0x1d3e6c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r15 + 29], 11 + QUAD $0x1d266c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r12 + 29], 12 QUAD $0x1d166c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rdx + 29], 13 QUAD $0x1d066c203a0f4466; BYTE $0x0e // pinsrb xmm13, byte [rsi + rax + 29], 14 LONG $0x6f0f4166; BYTE $0xcf // movdqa xmm1, xmm15 @@ -6468,19 +6922,19 @@ LBB1_87: QUAD $0x061f0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 31], 6 QUAD $0x1e3e64203a0f4466; BYTE $0x07 // pinsrb xmm12, byte [rsi + rdi + 30], 7 QUAD $0x071f3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 31], 7 - QUAD $0x1e2664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r12 + 30], 8 - QUAD $0x1f2644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r12 + 31], 8 + QUAD $0x1e3664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r14 + 30], 8 + QUAD $0x1f3644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r14 + 31], 8 QUAD $0x1e0e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r9 + 30], 9 QUAD $0x1f0e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 31], 9 QUAD $0x1e1e64203a0f4466; BYTE $0x0a // pinsrb xmm12, byte [rsi + rbx + 30], 10 QUAD $0x0a1f1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 31], 10 - QUAD $0x1e3664203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r14 + 30], 11 - QUAD $0x1f3644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 31], 11 - QUAD $0x1e3e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r15 + 30], 12 - QUAD $0x1f3e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r15 + 31], 12 + QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] + QUAD $0x1e3e64203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r15 + 30], 11 + QUAD $0x1f3e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r15 + 31], 11 + QUAD $0x1e2664203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r12 + 30], 12 + QUAD $0x1f2644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r12 + 31], 12 QUAD $0x1e1664203a0f4466; BYTE $0x0d // pinsrb xmm12, byte [rsi + rdx + 30], 13 QUAD $0x0d1f1644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 31], 13 - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] QUAD $0x1e0664203a0f4466; BYTE $0x0e // pinsrb xmm12, byte [rsi + rax + 30], 14 QUAD $0x0e1f0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 31], 14 QUAD $0x1e0664203a0f4666; BYTE $0x0f // pinsrb xmm12, byte [rsi + r8 + 30], 15 @@ -6495,7 +6949,7 @@ LBB1_87: LONG $0xeb0f4166; BYTE $0xc5 // por xmm0, xmm13 LONG $0x6f0f4166; BYTE $0xc8 // movdqa xmm1, xmm8 LONG $0xc8600f66 // punpcklbw xmm1, xmm0 - QUAD $0x0000e024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 224] + QUAD $0x0000b024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 176] LONG $0xd46f0f66 // movdqa xmm2, xmm4 LONG $0x600f4166; BYTE $0xd6 // punpcklbw xmm2, xmm14 LONG $0xda6f0f66 // movdqa xmm3, xmm2 @@ -6507,40 +6961,40 @@ LBB1_87: LONG $0x610f4166; BYTE $0xc0 // punpcklwd xmm0, xmm8 LONG $0x690f4166; BYTE $0xe0 // punpckhwd xmm4, xmm8 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - LONG $0x7f0f41f3; WORD $0x8e64; BYTE $0x30 // movdqu oword [r14 + 4*rcx + 48], xmm4 - LONG $0x7f0f41f3; WORD $0x8e44; BYTE $0x20 // movdqu oword [r14 + 4*rcx + 32], xmm0 - LONG $0x7f0f41f3; WORD $0x8e54; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm2 - LONG $0x7f0f41f3; WORD $0x8e1c // movdqu oword [r14 + 4*rcx], xmm3 + LONG $0x647f0ff3; WORD $0x308f // movdqu oword [rdi + 4*rcx + 48], xmm4 + LONG $0x447f0ff3; WORD $0x208f // movdqu oword [rdi + 4*rcx + 32], xmm0 + LONG $0x547f0ff3; WORD $0x108f // movdqu oword [rdi + 4*rcx + 16], xmm2 + LONG $0x1c7f0ff3; BYTE $0x8f // movdqu oword [rdi + 4*rcx], xmm3 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000f8248c3b48 // cmp rcx, qword [rsp + 248] - JNE LBB1_87 + QUAD $0x000000e8248c3b48 // cmp rcx, qword [rsp + 232] + JNE LBB1_88 QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] - QUAD $0x000000f824bc3b4c // cmp r15, qword [rsp + 248] - LONG $0x245c8a44; BYTE $0x08 // mov r11b, byte [rsp + 8] + QUAD $0x000000e824bc3b4c // cmp r15, qword [rsp + 232] + LONG $0x245c8a44; BYTE $0x04 // mov r11b, byte [rsp + 4] QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - JNE LBB1_89 - JMP LBB1_92 + JNE LBB1_90 + JMP LBB1_93 -LBB1_66: +LBB1_67: LONG $0xf0e78349 // and r15, -16 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi QUAD $0x0000010824848948 // mov qword [rsp + 264], rax - QUAD $0x000000f824bc894c // mov qword [rsp + 248], r15 + QUAD $0x000000e824bc894c // mov qword [rsp + 232], r15 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] - LONG $0x24448948; BYTE $0x50 // mov qword [rsp + 80], rax + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax LONG $0xc3b60f41 // movzx eax, r11b LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 - QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 + QUAD $0x0000c0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm1 WORD $0xc031 // xor eax, eax QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 -LBB1_67: +LBB1_68: WORD $0x8949; BYTE $0xc7 // mov r15, rax QUAD $0x0000009824848948 // mov qword [rsp + 152], rax LONG $0x05e7c149 // shl r15, 5 @@ -6554,7 +7008,7 @@ LBB1_67: WORD $0x894c; BYTE $0xfb // mov rbx, r15 WORD $0x894d; BYTE $0xfe // mov r14, r15 WORD $0x894c; BYTE $0xf8 // mov rax, r15 - LONG $0x247c894c; BYTE $0x70 // mov qword [rsp + 112], r15 + LONG $0x247c894c; BYTE $0x68 // mov qword [rsp + 104], r15 LONG $0x14b60f42; BYTE $0x3e // movzx edx, byte [rsi + r15] LONG $0x6e0f4466; BYTE $0xfa // movd xmm15, edx LONG $0x54b60f42; WORD $0x013e // movzx edx, byte [rsi + r15 + 1] @@ -6574,12 +7028,12 @@ LBB1_67: QUAD $0x0000d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm0 LONG $0x54b60f42; WORD $0x083e // movzx edx, byte [rsi + r15 + 8] LONG $0xc26e0f66 // movd xmm0, edx - QUAD $0x00012024847f0f66; BYTE $0x00 // movdqa oword [rsp + 288], xmm0 + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 LONG $0x54b60f42; WORD $0x093e // movzx edx, byte [rsi + r15 + 9] LONG $0x6e0f4466; BYTE $0xd2 // movd xmm10, edx LONG $0x54b60f42; WORD $0x0a3e // movzx edx, byte [rsi + r15 + 10] LONG $0xc26e0f66 // movd xmm0, edx - QUAD $0x0000a024847f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm0 + QUAD $0x0000b024847f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm0 LONG $0x54b60f42; WORD $0x0b3e // movzx edx, byte [rsi + r15 + 11] LONG $0x6e0f4466; BYTE $0xda // movd xmm11, edx LONG $0x54b60f42; WORD $0x0c3e // movzx edx, byte [rsi + r15 + 12] @@ -6589,13 +7043,13 @@ LBB1_67: LONG $0x54b60f42; WORD $0x0e3e // movzx edx, byte [rsi + r15 + 14] LONG $0xc26e0f66 // movd xmm0, edx QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x30 // mov qword [rsp + 48], r15 WORD $0x894d; BYTE $0xfd // mov r13, r15 LONG $0x20cd8349 // or r13, 32 - LONG $0x246c894c; BYTE $0x18 // mov qword [rsp + 24], r13 + LONG $0x246c894c; BYTE $0x28 // mov qword [rsp + 40], r13 LONG $0x40cf8348 // or rdi, 64 LONG $0x60c98348 // or rcx, 96 - QUAD $0x00000080248c8948 // mov qword [rsp + 128], rcx + LONG $0x244c8948; BYTE $0x78 // mov qword [rsp + 120], rcx LONG $0x80ca8149; WORD $0x0000; BYTE $0x00 // or r10, 128 LONG $0xa0c88149; WORD $0x0000; BYTE $0x00 // or r8, 160 LONG $0xc0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 192 @@ -6604,52 +7058,52 @@ LBB1_67: LONG $0x20cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 288 LONG $0x40ce8149; WORD $0x0001; BYTE $0x00 // or r14, 320 LONG $0x01600d48; WORD $0x0000 // or rax, 352 - LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x0000008024848948 // mov qword [rsp + 128], rax + LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] LONG $0x80ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 384 - LONG $0x24548948; BYTE $0x70 // mov qword [rsp + 112], rdx + LONG $0x24548948; BYTE $0x68 // mov qword [rsp + 104], rdx WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x01a00d48; WORD $0x0000 // or rax, 416 - LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax + LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x01e00d48; WORD $0x0000 // or rax, 480 - LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax QUAD $0x012e3c203a0f4666 // pinsrb xmm15, byte [rsi + r13], 1 QUAD $0x023e3c203a0f4466 // pinsrb xmm15, byte [rsi + rdi], 2 QUAD $0x030e3c203a0f4466 // pinsrb xmm15, byte [rsi + rcx], 3 QUAD $0x04163c203a0f4666 // pinsrb xmm15, byte [rsi + r10], 4 WORD $0x894d; BYTE $0xc7 // mov r15, r8 - LONG $0x2444894c; BYTE $0x78 // mov qword [rsp + 120], r8 + LONG $0x2444894c; BYTE $0x48 // mov qword [rsp + 72], r8 QUAD $0x05063c203a0f4666 // pinsrb xmm15, byte [rsi + r8], 5 - LONG $0x2464894c; BYTE $0x68 // mov qword [rsp + 104], r12 + LONG $0x2464894c; BYTE $0x60 // mov qword [rsp + 96], r12 QUAD $0x06263c203a0f4666 // pinsrb xmm15, byte [rsi + r12], 6 WORD $0x894d; BYTE $0xc8 // mov r8, r9 QUAD $0x070e3c203a0f4666 // pinsrb xmm15, byte [rsi + r9], 7 WORD $0x894d; BYTE $0xd9 // mov r9, r11 - LONG $0x245c894c; BYTE $0x10 // mov qword [rsp + 16], r11 + LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 QUAD $0x081e3c203a0f4666 // pinsrb xmm15, byte [rsi + r11], 8 LONG $0x245c8948; BYTE $0x40 // mov qword [rsp + 64], rbx QUAD $0x091e3c203a0f4466 // pinsrb xmm15, byte [rsi + rbx], 9 - LONG $0x2474894c; BYTE $0x60 // mov qword [rsp + 96], r14 + LONG $0x2474894c; BYTE $0x58 // mov qword [rsp + 88], r14 QUAD $0x0a363c203a0f4666 // pinsrb xmm15, byte [rsi + r14], 10 - LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] + QUAD $0x0000008024ac8b4c // mov r13, qword [rsp + 128] QUAD $0x0b2e3c203a0f4666 // pinsrb xmm15, byte [rsi + r13], 11 QUAD $0x0c163c203a0f4466 // pinsrb xmm15, byte [rsi + rdx], 12 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] QUAD $0x0d0e3c203a0f4466 // pinsrb xmm15, byte [rsi + rcx], 13 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x0e0e3c203a0f4466 // pinsrb xmm15, byte [rsi + rcx], 14 QUAD $0x0f063c203a0f4466 // pinsrb xmm15, byte [rsi + rax], 15 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] + LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] QUAD $0x011e6c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r11 + 1], 1 QUAD $0x02013e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 1], 2 - QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] + LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] QUAD $0x011e6c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r11 + 1], 3 QUAD $0x01166c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r10 + 1], 4 - LONG $0x2454894c; BYTE $0x58 // mov qword [rsp + 88], r10 + LONG $0x2454894c; BYTE $0x50 // mov qword [rsp + 80], r10 QUAD $0x013e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r15 + 1], 5 QUAD $0x01266c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r12 + 1], 6 QUAD $0x01066c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r8 + 1], 7 @@ -6660,46 +7114,46 @@ LBB1_67: QUAD $0x012e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r13 + 1], 11 WORD $0x894d; BYTE $0xe8 // mov r8, r13 QUAD $0x0c01166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 1], 12 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x0d01166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 1], 13 QUAD $0x0e010e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 1], 14 QUAD $0x0f01066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 1], 15 - QUAD $0x00b0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 176] + QUAD $0x00c0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 192] LONG $0x740f4166; BYTE $0xe9 // pcmpeqb xmm5, xmm9 LONG $0xfd6f0f66 // movdqa xmm7, xmm5 QUAD $0x000000a0a56f0f66 // movdqa xmm4, oword 160[rbp] /* [rip + .LCPI1_10] */ LONG $0xfcdb0f66 // pand xmm7, xmm4 LONG $0xfdf80f66 // psubb xmm7, xmm5 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] LONG $0x0654b60f; BYTE $0x0f // movzx edx, byte [rsi + rax + 15] LONG $0x6e0f4466; BYTE $0xf2 // movd xmm14, edx LONG $0x740f4566; BYTE $0xf9 // pcmpeqb xmm15, xmm9 - LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] + LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x01021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 1 QUAD $0x02023e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 2], 2 WORD $0x894d; BYTE $0xdc // mov r12, r11 QUAD $0x021e74203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r11 + 2], 3 QUAD $0x021674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r10 + 2], 4 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] QUAD $0x05020e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 2], 5 - LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] + LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] QUAD $0x021e74203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r11 + 2], 6 - QUAD $0x000000c024bc894c // mov qword [rsp + 192], r15 + LONG $0x247c894c; BYTE $0x70 // mov qword [rsp + 112], r15 QUAD $0x023e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r15 + 2], 7 - LONG $0x246c8b4c; BYTE $0x10 // mov r13, qword [rsp + 16] + LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] QUAD $0x022e74203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r13 + 2], 8 LONG $0x24748b4c; BYTE $0x40 // mov r14, qword [rsp + 64] QUAD $0x023674203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r14 + 2], 9 - LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] + LONG $0x244c8b4c; BYTE $0x58 // mov r9, qword [rsp + 88] QUAD $0x020e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r9 + 2], 10 QUAD $0x020674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r8 + 2], 11 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] QUAD $0x0c021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 12 - LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] + LONG $0x24548b4c; BYTE $0x08 // mov r10, qword [rsp + 8] QUAD $0x021674203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r10 + 2], 13 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] - QUAD $0x0e021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 14 LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + QUAD $0x0e021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 14 + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x0f021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 15 LONG $0xdb0f4466; BYTE $0xfc // pand xmm15, xmm4 LONG $0x740f4166; BYTE $0xf1 // pcmpeqb xmm6, xmm9 @@ -6712,7 +7166,7 @@ LBB1_67: QUAD $0x02033e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 3], 2 WORD $0x894c; BYTE $0xe0 // mov rax, r12 QUAD $0x032654203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r12 + 3], 3 - LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] + LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] QUAD $0x032654203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r12 + 3], 4 QUAD $0x05030e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 3], 5 QUAD $0x031e54203a0f4266; BYTE $0x06 // pinsrb xmm2, byte [rsi + r11 + 3], 6 @@ -6722,12 +7176,12 @@ LBB1_67: QUAD $0x030e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r9 + 3], 10 WORD $0x894d; BYTE $0xce // mov r14, r9 QUAD $0x030654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r8 + 3], 11 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] QUAD $0x033e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r15 + 3], 12 QUAD $0x031654203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r10 + 3], 13 - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] QUAD $0x032e54203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r13 + 3], 14 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x0f031654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 3], 15 QUAD $0x01041e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 4], 1 QUAD $0x02043e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 4], 2 @@ -6736,9 +7190,9 @@ LBB1_67: QUAD $0x05040e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 4], 5 WORD $0x894c; BYTE $0xd9 // mov rcx, r11 QUAD $0x041e4c203a0f4266; BYTE $0x06 // pinsrb xmm1, byte [rsi + r11 + 4], 6 - QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] QUAD $0x041e4c203a0f4266; BYTE $0x07 // pinsrb xmm1, byte [rsi + r11 + 4], 7 - LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] + LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] QUAD $0x040e4c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r9 + 4], 8 LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] QUAD $0x09041e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 4], 9 @@ -6752,7 +7206,7 @@ LBB1_67: QUAD $0x0f04164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 4], 15 WORD $0x8949; BYTE $0xd2 // mov r10, rdx LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] LONG $0x1e54b60f; BYTE $0x11 // movzx edx, byte [rsi + rbx + 17] LONG $0xc26e0f66 // movd xmm0, edx LONG $0x740f4166; BYTE $0xd1 // pcmpeqb xmm2, xmm9 @@ -6764,13 +7218,13 @@ LBB1_67: LONG $0xcaeb0f66 // por xmm1, xmm2 LONG $0x1e54b60f; BYTE $0x12 // movzx edx, byte [rsi + rbx + 18] LONG $0xea6e0f66 // movd xmm5, edx - LONG $0x24448b4c; BYTE $0x18 // mov r8, qword [rsp + 24] + LONG $0x24448b4c; BYTE $0x28 // mov r8, qword [rsp + 40] QUAD $0x050644203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rsi + r8 + 5], 1 QUAD $0x053e44203a0f4466; BYTE $0x02 // pinsrb xmm8, byte [rsi + rdi + 5], 2 QUAD $0x050644203a0f4466; BYTE $0x03 // pinsrb xmm8, byte [rsi + rax + 5], 3 - LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] + LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] QUAD $0x051644203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rdx + 5], 4 - LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] + LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] QUAD $0x051644203a0f4466; BYTE $0x05 // pinsrb xmm8, byte [rsi + rdx + 5], 5 QUAD $0x050e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rcx + 5], 6 QUAD $0x051e44203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r11 + 5], 7 @@ -6780,7 +7234,7 @@ LBB1_67: QUAD $0x052644203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r12 + 5], 10 QUAD $0x053644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r14 + 5], 11 QUAD $0x053e44203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r15 + 5], 12 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] QUAD $0x050e44203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rsi + rcx + 5], 13 QUAD $0x052e44203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r13 + 5], 14 QUAD $0x051644203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r10 + 5], 15 @@ -6797,33 +7251,33 @@ LBB1_67: QUAD $0x06065c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r8 + 6], 1 QUAD $0x02063e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 6], 2 QUAD $0x0306065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 3 - LONG $0x245c8b4c; BYTE $0x58 // mov r11, qword [rsp + 88] + LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] QUAD $0x061e5c203a0f4266; BYTE $0x04 // pinsrb xmm3, byte [rsi + r11 + 6], 4 - LONG $0x244c8b4c; BYTE $0x78 // mov r9, qword [rsp + 120] + LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] QUAD $0x060e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r9 + 6], 5 - LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] + LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] QUAD $0x06065c203a0f4266; BYTE $0x06 // pinsrb xmm3, byte [rsi + r8 + 6], 6 - QUAD $0x000000c024a48b4c // mov r12, qword [rsp + 192] + LONG $0x24648b4c; BYTE $0x70 // mov r12, qword [rsp + 112] QUAD $0x06265c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r12 + 6], 7 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0806065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 8 LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] QUAD $0x09061e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 6], 9 - LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] + LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] QUAD $0x06365c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r14 + 6], 10 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x0b060e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 6], 11 QUAD $0x063e5c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r15 + 6], 12 - LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] + LONG $0x24548b4c; BYTE $0x08 // mov r10, qword [rsp + 8] QUAD $0x06165c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r10 + 6], 13 QUAD $0x062e5c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rsi + r13 + 6], 14 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] QUAD $0x0f060e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 6], 15 QUAD $0x0000d024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 208] QUAD $0x01071654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 7], 1 QUAD $0x02073e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 7], 2 - QUAD $0x000000e024bc8948 // mov qword [rsp + 224], rdi - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x000000a024bc8948 // mov qword [rsp + 160], rdi + LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] QUAD $0x03070e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 7], 3 QUAD $0x071e54203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r11 + 7], 4 QUAD $0x070e54203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r9 + 7], 5 @@ -6834,12 +7288,12 @@ LBB1_67: QUAD $0x09071e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 7], 9 QUAD $0x073654203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r14 + 7], 10 WORD $0x894d; BYTE $0xf4 // mov r12, r14 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b070654203a0f66 // pinsrb xmm2, byte [rsi + rax + 7], 11 QUAD $0x073e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r15 + 7], 12 QUAD $0x071654203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r10 + 7], 13 QUAD $0x072e54203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r13 + 7], 14 - LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x10 // mov r14, qword [rsp + 16] QUAD $0x073654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r14 + 7], 15 LONG $0x740f4166; BYTE $0xd9 // pcmpeqb xmm3, xmm9 QUAD $0x000000f08d6f0f66 // movdqa xmm1, oword 240[rbp] /* [rip + .LCPI1_15] */ @@ -6850,19 +7304,19 @@ LBB1_67: LONG $0xd1db0f66 // pand xmm2, xmm1 LONG $0xd3eb0f66 // por xmm2, xmm3 LONG $0xca6f0f66 // movdqa xmm1, xmm2 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] LONG $0x1e54b60f; BYTE $0x15 // movzx edx, byte [rsi + rbx + 21] LONG $0xd26e0f66 // movd xmm2, edx - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x091654203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rdx + 9], 1 QUAD $0x093e54203a0f4466; BYTE $0x02 // pinsrb xmm10, byte [rsi + rdi + 9], 2 QUAD $0x090e54203a0f4466; BYTE $0x03 // pinsrb xmm10, byte [rsi + rcx + 9], 3 - LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x093e54203a0f4466; BYTE $0x04 // pinsrb xmm10, byte [rsi + rdi + 9], 4 QUAD $0x090e54203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r9 + 9], 5 QUAD $0x090654203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r8 + 9], 6 QUAD $0x091e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r11 + 9], 7 - LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] + LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] QUAD $0x090e54203a0f4466; BYTE $0x08 // pinsrb xmm10, byte [rsi + rcx + 9], 8 LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] QUAD $0x090e54203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rcx + 9], 9 @@ -6881,132 +7335,130 @@ LBB1_67: LONG $0xf80f4166; BYTE $0xca // psubb xmm1, xmm10 LONG $0x1e54b60f; BYTE $0x16 // movzx edx, byte [rsi + rbx + 22] LONG $0xda6e0f66 // movd xmm3, edx - QUAD $0x00012024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 288] - LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] + QUAD $0x0000f024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 240] + LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] QUAD $0x081664203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r10 + 8], 1 - QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] + QUAD $0x000000a024a48b4c // mov r12, qword [rsp + 160] QUAD $0x082664203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r12 + 8], 2 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x03080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 3 QUAD $0x04083e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 8], 4 QUAD $0x080e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r9 + 8], 5 QUAD $0x080664203a0f4266; BYTE $0x06 // pinsrb xmm4, byte [rsi + r8 + 8], 6 WORD $0x894c; BYTE $0xdb // mov rbx, r11 QUAD $0x081e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r11 + 8], 7 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x08081664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 8], 8 QUAD $0x09080e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 8], 9 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] QUAD $0x0a080e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 8], 10 - LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] + QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] QUAD $0x083664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r14 + 8], 11 QUAD $0x083e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r15 + 8], 12 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x08 // mov rdi, qword [rsp + 8] QUAD $0x0d083e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 8], 13 QUAD $0x082e64203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r13 + 8], 14 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] QUAD $0x0f083e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 8], 15 LONG $0x740f4166; BYTE $0xe1 // pcmpeqb xmm4, xmm9 LONG $0xdb0f4166; BYTE $0xe0 // pand xmm4, xmm8 - QUAD $0x00a024946f0f4466; WORD $0x0000 // movdqa xmm10, oword [rsp + 160] + QUAD $0x00b024946f0f4466; WORD $0x0000 // movdqa xmm10, oword [rsp + 176] QUAD $0x0a1654203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rsi + r10 + 10], 1 QUAD $0x0a2654203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r12 + 10], 2 QUAD $0x0a0654203a0f4466; BYTE $0x03 // pinsrb xmm10, byte [rsi + rax + 10], 3 - LONG $0x245c8b4c; BYTE $0x58 // mov r11, qword [rsp + 88] + LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] QUAD $0x0a1e54203a0f4666; BYTE $0x04 // pinsrb xmm10, byte [rsi + r11 + 10], 4 QUAD $0x0a0e54203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r9 + 10], 5 QUAD $0x0a0654203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r8 + 10], 6 + WORD $0x894d; BYTE $0xc2 // mov r10, r8 QUAD $0x0a1e54203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rbx + 10], 7 - WORD $0x8949; BYTE $0xda // mov r10, rbx + WORD $0x8949; BYTE $0xd8 // mov r8, rbx QUAD $0x0a1654203a0f4466; BYTE $0x08 // pinsrb xmm10, byte [rsi + rdx + 10], 8 - LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] - QUAD $0x0a0654203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r8 + 10], 9 + LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] + QUAD $0x0a1e54203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rbx + 10], 9 QUAD $0x0a0e54203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rcx + 10], 10 QUAD $0x0a3654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r14 + 10], 11 - WORD $0x894d; BYTE $0xf5 // mov r13, r14 QUAD $0x0a3e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r15 + 10], 12 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] QUAD $0x0a0e54203a0f4466; BYTE $0x0d // pinsrb xmm10, byte [rsi + rcx + 10], 13 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] - QUAD $0x0a1654203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rdx + 10], 14 + QUAD $0x0a2e54203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rsi + r13 + 10], 14 QUAD $0x0a3e54203a0f4466; BYTE $0x0f // pinsrb xmm10, byte [rsi + rdi + 10], 15 LONG $0x740f4566; BYTE $0xd1 // pcmpeqb xmm10, xmm9 QUAD $0x0000b095db0f4466; BYTE $0x00 // pand xmm10, oword 176[rbp] /* [rip + .LCPI1_11] */ LONG $0xeb0f4466; BYTE $0xd4 // por xmm10, xmm4 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] LONG $0x3e54b60f; BYTE $0x17 // movzx edx, byte [rsi + rdi + 23] LONG $0x6e0f4466; BYTE $0xc2 // movd xmm8, edx LONG $0xeb0f4466; BYTE $0xd1 // por xmm10, xmm1 - QUAD $0x00a024947f0f4466; WORD $0x0000 // movdqa oword [rsp + 160], xmm10 + QUAD $0x00b024947f0f4466; WORD $0x0000 // movdqa oword [rsp + 176], xmm10 LONG $0x3e54b60f; BYTE $0x18 // movzx edx, byte [rsi + rdi + 24] LONG $0x6e0f4466; BYTE $0xd2 // movd xmm10, edx - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x0b165c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rdx + 11], 1 QUAD $0x0b265c203a0f4666; BYTE $0x02 // pinsrb xmm11, byte [rsi + r12 + 11], 2 QUAD $0x0b065c203a0f4466; BYTE $0x03 // pinsrb xmm11, byte [rsi + rax + 11], 3 QUAD $0x0b1e5c203a0f4666; BYTE $0x04 // pinsrb xmm11, byte [rsi + r11 + 11], 4 QUAD $0x0b0e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r9 + 11], 5 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] - QUAD $0x0b1e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rbx + 11], 6 - WORD $0x894d; BYTE $0xd6 // mov r14, r10 - QUAD $0x0b165c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r10 + 11], 7 - LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] - QUAD $0x0b165c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r10 + 11], 8 - QUAD $0x0b065c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r8 + 11], 9 - LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] - QUAD $0x0b0e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r9 + 11], 10 - QUAD $0x0b2e5c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r13 + 11], 11 + QUAD $0x0b165c203a0f4666; BYTE $0x06 // pinsrb xmm11, byte [rsi + r10 + 11], 6 + QUAD $0x0b065c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r8 + 11], 7 + LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] + QUAD $0x0b0e5c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r9 + 11], 8 + WORD $0x8949; BYTE $0xd8 // mov r8, rbx + QUAD $0x0b1e5c203a0f4466; BYTE $0x09 // pinsrb xmm11, byte [rsi + rbx + 11], 9 + LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x0b3e5c203a0f4466; BYTE $0x0a // pinsrb xmm11, byte [rsi + rdi + 11], 10 + QUAD $0x0b365c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r14 + 11], 11 QUAD $0x0b3e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r15 + 11], 12 QUAD $0x0b0e5c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rcx + 11], 13 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] - QUAD $0x0b3e5c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rdi + 11], 14 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] - QUAD $0x0b3e5c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rdi + 11], 15 + QUAD $0x0b2e5c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r13 + 11], 14 + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] + QUAD $0x0b1e5c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rbx + 11], 15 QUAD $0x0c166c203a0f4466; BYTE $0x01 // pinsrb xmm13, byte [rsi + rdx + 12], 1 QUAD $0x0c266c203a0f4666; BYTE $0x02 // pinsrb xmm13, byte [rsi + r12 + 12], 2 QUAD $0x0c066c203a0f4466; BYTE $0x03 // pinsrb xmm13, byte [rsi + rax + 12], 3 QUAD $0x0c1e6c203a0f4666; BYTE $0x04 // pinsrb xmm13, byte [rsi + r11 + 12], 4 - LONG $0x246c8b4c; BYTE $0x78 // mov r13, qword [rsp + 120] + LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] QUAD $0x0c2e6c203a0f4666; BYTE $0x05 // pinsrb xmm13, byte [rsi + r13 + 12], 5 - QUAD $0x0c1e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rbx + 12], 6 - QUAD $0x0c366c203a0f4666; BYTE $0x07 // pinsrb xmm13, byte [rsi + r14 + 12], 7 - QUAD $0x0c166c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r10 + 12], 8 + QUAD $0x0c166c203a0f4666; BYTE $0x06 // pinsrb xmm13, byte [rsi + r10 + 12], 6 + LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] + QUAD $0x0c1e6c203a0f4466; BYTE $0x07 // pinsrb xmm13, byte [rsi + rbx + 12], 7 + QUAD $0x0c0e6c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r9 + 12], 8 + WORD $0x894d; BYTE $0xca // mov r10, r9 QUAD $0x0c066c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r8 + 12], 9 - WORD $0x894c; BYTE $0xc3 // mov rbx, r8 - QUAD $0x0c0e6c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r9 + 12], 10 - WORD $0x894d; BYTE $0xc8 // mov r8, r9 - LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] - QUAD $0x0c2e6c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r13 + 12], 11 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + QUAD $0x0c3e6c203a0f4466; BYTE $0x0a // pinsrb xmm13, byte [rsi + rdi + 12], 10 + WORD $0x8949; BYTE $0xf8 // mov r8, rdi + QUAD $0x0c366c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r14 + 12], 11 QUAD $0x0c3e6c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r15 + 12], 12 QUAD $0x0c0e6c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rcx + 12], 13 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] - QUAD $0x0c0e6c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r9 + 12], 14 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x0c2e6c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r13 + 12], 14 + LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] QUAD $0x0c3e6c203a0f4466; BYTE $0x0f // pinsrb xmm13, byte [rsi + rdi + 12], 15 QUAD $0x0d1664203a0f4466; BYTE $0x01 // pinsrb xmm12, byte [rsi + rdx + 13], 1 QUAD $0x0d2664203a0f4666; BYTE $0x02 // pinsrb xmm12, byte [rsi + r12 + 13], 2 QUAD $0x0d0664203a0f4466; BYTE $0x03 // pinsrb xmm12, byte [rsi + rax + 13], 3 QUAD $0x0d1e64203a0f4666; BYTE $0x04 // pinsrb xmm12, byte [rsi + r11 + 13], 4 - LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x0d0664203a0f4466; BYTE $0x05 // pinsrb xmm12, byte [rsi + rax + 13], 5 - LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] + LONG $0x24548b48; BYTE $0x60 // mov rdx, qword [rsp + 96] QUAD $0x0d1664203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rdx + 13], 6 - QUAD $0x0d3664203a0f4666; BYTE $0x07 // pinsrb xmm12, byte [rsi + r14 + 13], 7 + LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] + QUAD $0x0d1e64203a0f4466; BYTE $0x07 // pinsrb xmm12, byte [rsi + rbx + 13], 7 QUAD $0x0d1664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r10 + 13], 8 - QUAD $0x0d1e64203a0f4466; BYTE $0x09 // pinsrb xmm12, byte [rsi + rbx + 13], 9 + QUAD $0x0d0e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r9 + 13], 9 QUAD $0x0d0664203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r8 + 13], 10 - QUAD $0x0d2e64203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r13 + 13], 11 + QUAD $0x0d3664203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r14 + 13], 11 QUAD $0x0d3e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r15 + 13], 12 QUAD $0x0d0e64203a0f4466; BYTE $0x0d // pinsrb xmm12, byte [rsi + rcx + 13], 13 - WORD $0x894d; BYTE $0xcd // mov r13, r9 - QUAD $0x0d0e64203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rsi + r9 + 13], 14 + QUAD $0x0d2e64203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rsi + r13 + 13], 14 QUAD $0x0d3e64203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rsi + rdi + 13], 15 LONG $0x740f4566; BYTE $0xd9 // pcmpeqb xmm11, xmm9 QUAD $0x0000c09ddb0f4466; BYTE $0x00 // pand xmm11, oword 192[rbp] /* [rip + .LCPI1_12] */ LONG $0x740f4566; BYTE $0xe9 // pcmpeqb xmm13, xmm9 QUAD $0x0000d0addb0f4466; BYTE $0x00 // pand xmm13, oword 208[rbp] /* [rip + .LCPI1_13] */ LONG $0xeb0f4566; BYTE $0xeb // por xmm13, xmm11 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] LONG $0x0e54b60f; BYTE $0x19 // movzx edx, byte [rsi + rcx + 25] LONG $0xca6e0f66 // movd xmm1, edx LONG $0x740f4566; BYTE $0xe1 // pcmpeqb xmm12, xmm9 @@ -7015,32 +7467,30 @@ LBB1_67: LONG $0x0e54b60f; BYTE $0x1a // movzx edx, byte [rsi + rcx + 26] LONG $0x6e0f4466; BYTE $0xda // movd xmm11, edx QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x010e0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 14], 1 QUAD $0x0e2664203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r12 + 14], 2 - QUAD $0x0000008024948b4c // mov r10, qword [rsp + 128] + LONG $0x24548b4c; BYTE $0x78 // mov r10, qword [rsp + 120] QUAD $0x0e1664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r10 + 14], 3 QUAD $0x0e1e64203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r11 + 14], 4 QUAD $0x050e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 14], 5 - LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x060e0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 14], 6 - WORD $0x894c; BYTE $0xf7 // mov rdi, r14 - QUAD $0x0e3664203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r14 + 14], 7 - LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] + WORD $0x8948; BYTE $0xdf // mov rdi, rbx + QUAD $0x070e1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 14], 7 + LONG $0x24448b4c; BYTE $0x18 // mov r8, qword [rsp + 24] QUAD $0x0e0664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r8 + 14], 8 - LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] QUAD $0x0e0e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 14], 9 - LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] + LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] QUAD $0x0a0e1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 14], 10 - LONG $0x24748b4c; BYTE $0x48 // mov r14, qword [rsp + 72] QUAD $0x0e3664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r14 + 14], 11 QUAD $0x0e3e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r15 + 14], 12 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x0d0e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 14], 13 QUAD $0x0e2e64203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r13 + 14], 14 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x0f0e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 14], 15 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x0f1674203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rsi + rdx + 15], 1 QUAD $0x0f2674203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r12 + 15], 2 QUAD $0x0f1674203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r10 + 15], 3 @@ -7053,12 +7503,12 @@ LBB1_67: QUAD $0x0f1e74203a0f4466; BYTE $0x0a // pinsrb xmm14, byte [rsi + rbx + 15], 10 QUAD $0x0f3674203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r14 + 15], 11 QUAD $0x0f3e74203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r15 + 15], 12 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x0f1674203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rsi + rdx + 15], 13 QUAD $0x0f2e74203a0f4666; BYTE $0x0e // pinsrb xmm14, byte [rsi + r13 + 15], 14 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x0f1674203a0f4466; BYTE $0x0f // pinsrb xmm14, byte [rsi + rdx + 15], 15 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x10167c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rsi + rdx + 16], 1 QUAD $0x10267c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r12 + 16], 2 QUAD $0x10167c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r10 + 16], 3 @@ -7071,10 +7521,10 @@ LBB1_67: QUAD $0x101e7c203a0f4466; BYTE $0x0a // pinsrb xmm15, byte [rsi + rbx + 16], 10 QUAD $0x10367c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r14 + 16], 11 QUAD $0x103e7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r15 + 16], 12 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x10167c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rsi + rdx + 16], 13 QUAD $0x102e7c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r13 + 16], 14 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x01111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 1 QUAD $0x112644203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r12 + 17], 2 QUAD $0x111644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r10 + 17], 3 @@ -7084,19 +7534,20 @@ LBB1_67: QUAD $0x06110e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 17], 6 QUAD $0x07113e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 17], 7 QUAD $0x110644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r8 + 17], 8 + WORD $0x894c; BYTE $0xc0 // mov rax, r8 QUAD $0x110e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 17], 9 QUAD $0x0a111e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 17], 10 QUAD $0x113644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 17], 11 QUAD $0x113e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r15 + 17], 12 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x0d110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 13 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] + QUAD $0x0d111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 13 + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] QUAD $0x0e111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 14 - QUAD $0x00a024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 160] - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] + QUAD $0x00b024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 176] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] LONG $0x54b60f42; WORD $0x1b26 // movzx edx, byte [rsi + r12 + 27] LONG $0x6e0f4466; BYTE $0xca // movd xmm9, edx - QUAD $0x00b024ac6f0f4466; WORD $0x0000 // movdqa xmm13, oword [rsp + 176] + QUAD $0x00c024ac6f0f4466; WORD $0x0000 // movdqa xmm13, oword [rsp + 192] LONG $0x740f4166; BYTE $0xe5 // pcmpeqb xmm4, xmm13 QUAD $0x000000f0a5db0f66 // pand xmm4, oword 240[rbp] /* [rip + .LCPI1_15] */ LONG $0x740f4566; BYTE $0xf5 // pcmpeqb xmm14, xmm13 @@ -7105,7 +7556,7 @@ LBB1_67: LONG $0xeb0f4466; BYTE $0xf4 // por xmm14, xmm4 LONG $0x54b60f42; WORD $0x1c26 // movzx edx, byte [rsi + r12 + 28] LONG $0xe26e0f66 // movd xmm4, edx - LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] + LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] QUAD $0x110644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r8 + 17], 15 LONG $0xeb0f4566; BYTE $0xf4 // por xmm14, xmm12 LONG $0x740f4166; BYTE $0xc5 // pcmpeqb xmm0, xmm13 @@ -7113,36 +7564,36 @@ LBB1_67: QUAD $0x0000a0a56f0f4466; BYTE $0x00 // movdqa xmm12, oword 160[rbp] /* [rip + .LCPI1_10] */ LONG $0xdb0f4566; BYTE $0xec // pand xmm13, xmm12 LONG $0xf80f4466; BYTE $0xe8 // psubb xmm13, xmm0 - QUAD $0x00a024ac7f0f4466; WORD $0x0000 // movdqa oword [rsp + 160], xmm13 + QUAD $0x00b024ac7f0f4466; WORD $0x0000 // movdqa oword [rsp + 176], xmm13 LONG $0x54b60f42; WORD $0x1d26 // movzx edx, byte [rsi + r12 + 29] LONG $0x6e0f4466; BYTE $0xea // movd xmm13, edx QUAD $0x10067c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r8 + 16], 15 - QUAD $0x0000b024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 176] + QUAD $0x0000c024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 192] LONG $0x740f4466; BYTE $0xf8 // pcmpeqb xmm15, xmm0 - LONG $0x24648b4c; BYTE $0x18 // mov r12, qword [rsp + 24] + LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] QUAD $0x12266c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r12 + 18], 1 - QUAD $0x000000e024948b48 // mov rdx, qword [rsp + 224] + QUAD $0x000000a024948b48 // mov rdx, qword [rsp + 160] QUAD $0x0212166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 2 QUAD $0x12166c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r10 + 18], 3 QUAD $0x121e6c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r11 + 18], 4 QUAD $0x122e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r13 + 18], 5 QUAD $0x06120e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 18], 6 QUAD $0x07123e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 18], 7 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x0812166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 8 + QUAD $0x0812066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 18], 8 QUAD $0x120e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r9 + 18], 9 QUAD $0x0a121e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 18], 10 QUAD $0x12366c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r14 + 18], 11 QUAD $0x123e6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r15 + 18], 12 - QUAD $0x0d12066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 18], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0e12066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 18], 14 + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] + QUAD $0x0d12166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 13 + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + QUAD $0x0e12166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 14 LONG $0xdb0f4566; BYTE $0xfc // pand xmm15, xmm12 QUAD $0x12066c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r8 + 18], 15 LONG $0xe8740f66 // pcmpeqb xmm5, xmm0 QUAD $0x000000b0addb0f66 // pand xmm5, oword 176[rbp] /* [rip + .LCPI1_11] */ LONG $0xeb0f4166; BYTE $0xef // por xmm5, xmm15 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] LONG $0x0654b60f; BYTE $0x1e // movzx edx, byte [rsi + rax + 30] LONG $0x6e0f4466; BYTE $0xe2 // movd xmm12, edx QUAD $0x13267c203a0f4266; BYTE $0x01 // pinsrb xmm7, byte [rsi + r12 + 19], 1 @@ -7160,7 +7611,7 @@ LBB1_67: LONG $0x0654b60f; BYTE $0x1f // movzx edx, byte [rsi + rax + 31] LONG $0xc26e0f66 // movd xmm0, edx QUAD $0x1f2644203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r12 + 31], 1 - QUAD $0x000000e024948b48 // mov rdx, qword [rsp + 224] + QUAD $0x000000a024948b48 // mov rdx, qword [rsp + 160] QUAD $0x0213167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 19], 2 QUAD $0x02141674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 20], 2 QUAD $0x02151654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 21], 2 @@ -7179,32 +7630,32 @@ LBB1_67: QUAD $0x132e7c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r13 + 19], 5 QUAD $0x06130e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 19], 6 QUAD $0x07133e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 19], 7 - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] - QUAD $0x13267c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r12 + 19], 8 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0813067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 19], 8 QUAD $0x130e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r9 + 19], 9 QUAD $0x0a131e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 19], 10 QUAD $0x13367c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r14 + 19], 11 QUAD $0x133e7c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r15 + 19], 12 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] QUAD $0x0d13167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 19], 13 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0e13067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 19], 14 + LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + QUAD $0x13267c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rsi + r12 + 19], 14 QUAD $0x13067c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r8 + 19], 15 QUAD $0x141674203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r10 + 20], 3 QUAD $0x141e74203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r11 + 20], 4 QUAD $0x142e74203a0f4266; BYTE $0x05 // pinsrb xmm6, byte [rsi + r13 + 20], 5 QUAD $0x06140e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 20], 6 QUAD $0x07143e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 20], 7 - QUAD $0x142674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r12 + 20], 8 + QUAD $0x08140674203a0f66 // pinsrb xmm6, byte [rsi + rax + 20], 8 QUAD $0x140e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r9 + 20], 9 QUAD $0x0a141e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 20], 10 QUAD $0x143674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r14 + 20], 11 QUAD $0x143e74203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r15 + 20], 12 QUAD $0x0d141674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 20], 13 - QUAD $0x0e140674203a0f66 // pinsrb xmm6, byte [rsi + rax + 20], 14 - QUAD $0x0000a024aceb0f66; BYTE $0x00 // por xmm5, oword [rsp + 160] + QUAD $0x142674203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r12 + 20], 14 + QUAD $0x0000b024aceb0f66; BYTE $0x00 // por xmm5, oword [rsp + 176] QUAD $0x140674203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r8 + 20], 15 - QUAD $0x00b024bc6f0f4466; WORD $0x0000 // movdqa xmm15, oword [rsp + 176] + QUAD $0x00c024bc6f0f4466; WORD $0x0000 // movdqa xmm15, oword [rsp + 192] LONG $0x740f4166; BYTE $0xff // pcmpeqb xmm7, xmm15 QUAD $0x000000c0bddb0f66 // pand xmm7, oword 192[rbp] /* [rip + .LCPI1_12] */ LONG $0x740f4166; BYTE $0xf7 // pcmpeqb xmm6, xmm15 @@ -7215,13 +7666,13 @@ LBB1_67: QUAD $0x152e54203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r13 + 21], 5 QUAD $0x06150e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 21], 6 QUAD $0x07153e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 21], 7 - QUAD $0x152654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r12 + 21], 8 + QUAD $0x08150654203a0f66 // pinsrb xmm2, byte [rsi + rax + 21], 8 QUAD $0x150e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r9 + 21], 9 QUAD $0x0a151e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 21], 10 QUAD $0x153654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r14 + 21], 11 QUAD $0x153e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r15 + 21], 12 QUAD $0x0d151654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 21], 13 - QUAD $0x0e150654203a0f66 // pinsrb xmm2, byte [rsi + rax + 21], 14 + QUAD $0x152654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r12 + 21], 14 QUAD $0x150654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r8 + 21], 15 LONG $0x740f4166; BYTE $0xd7 // pcmpeqb xmm2, xmm15 QUAD $0x000000e0bd6f0f66 // movdqa xmm7, oword 224[rbp] /* [rip + .LCPI1_14] */ @@ -7233,26 +7684,26 @@ LBB1_67: QUAD $0x162e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r13 + 22], 5 QUAD $0x06160e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 22], 6 QUAD $0x07163e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 22], 7 - QUAD $0x16265c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r12 + 22], 8 + QUAD $0x0816065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 22], 8 QUAD $0x160e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r9 + 22], 9 QUAD $0x0a161e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 22], 10 QUAD $0x16365c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r14 + 22], 11 QUAD $0x163e5c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r15 + 22], 12 QUAD $0x0d16165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 22], 13 - QUAD $0x0e16065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 22], 14 + QUAD $0x16265c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rsi + r12 + 22], 14 QUAD $0x16065c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r8 + 22], 15 QUAD $0x171644203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r10 + 23], 3 QUAD $0x171e44203a0f4666; BYTE $0x04 // pinsrb xmm8, byte [rsi + r11 + 23], 4 QUAD $0x172e44203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r13 + 23], 5 QUAD $0x170e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rcx + 23], 6 QUAD $0x173e44203a0f4466; BYTE $0x07 // pinsrb xmm8, byte [rsi + rdi + 23], 7 - QUAD $0x172644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r12 + 23], 8 + QUAD $0x170644203a0f4466; BYTE $0x08 // pinsrb xmm8, byte [rsi + rax + 23], 8 QUAD $0x170e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r9 + 23], 9 QUAD $0x171e44203a0f4466; BYTE $0x0a // pinsrb xmm8, byte [rsi + rbx + 23], 10 QUAD $0x173644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r14 + 23], 11 QUAD $0x173e44203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r15 + 23], 12 QUAD $0x171644203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rsi + rdx + 23], 13 - QUAD $0x170644203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rax + 23], 14 + QUAD $0x172644203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r12 + 23], 14 QUAD $0x170644203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r8 + 23], 15 LONG $0x740f4166; BYTE $0xdf // pcmpeqb xmm3, xmm15 QUAD $0x000000f0ad6f0f66 // movdqa xmm5, oword 240[rbp] /* [rip + .LCPI1_15] */ @@ -7267,13 +7718,13 @@ LBB1_67: QUAD $0x192e4c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r13 + 25], 5 QUAD $0x06190e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 25], 6 QUAD $0x07193e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 25], 7 - QUAD $0x19264c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r12 + 25], 8 + QUAD $0x0819064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 25], 8 QUAD $0x190e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r9 + 25], 9 QUAD $0x0a191e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 25], 10 QUAD $0x19364c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r14 + 25], 11 QUAD $0x193e4c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r15 + 25], 12 QUAD $0x0d19164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 25], 13 - QUAD $0x0e19064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 25], 14 + QUAD $0x19264c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rsi + r12 + 25], 14 QUAD $0x19064c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r8 + 25], 15 LONG $0xeb0f4466; BYTE $0xc2 // por xmm8, xmm2 LONG $0x740f4166; BYTE $0xcf // pcmpeqb xmm1, xmm15 @@ -7286,13 +7737,13 @@ LBB1_67: QUAD $0x182e54203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r13 + 24], 5 QUAD $0x180e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rcx + 24], 6 QUAD $0x183e54203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rdi + 24], 7 - QUAD $0x182654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r12 + 24], 8 + QUAD $0x180654203a0f4466; BYTE $0x08 // pinsrb xmm10, byte [rsi + rax + 24], 8 QUAD $0x180e54203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r9 + 24], 9 QUAD $0x181e54203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rbx + 24], 10 QUAD $0x183654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r14 + 24], 11 QUAD $0x183e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r15 + 24], 12 QUAD $0x181654203a0f4466; BYTE $0x0d // pinsrb xmm10, byte [rsi + rdx + 24], 13 - QUAD $0x180654203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rax + 24], 14 + QUAD $0x182654203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rsi + r12 + 24], 14 QUAD $0x180654203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r8 + 24], 15 LONG $0x740f4566; BYTE $0xd7 // pcmpeqb xmm10, xmm15 LONG $0xdb0f4466; BYTE $0xd3 // pand xmm10, xmm3 @@ -7301,13 +7752,13 @@ LBB1_67: QUAD $0x1a2e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r13 + 26], 5 QUAD $0x1a0e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rcx + 26], 6 QUAD $0x1a3e5c203a0f4466; BYTE $0x07 // pinsrb xmm11, byte [rsi + rdi + 26], 7 - QUAD $0x1a265c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r12 + 26], 8 + QUAD $0x1a065c203a0f4466; BYTE $0x08 // pinsrb xmm11, byte [rsi + rax + 26], 8 QUAD $0x1a0e5c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r9 + 26], 9 QUAD $0x1a1e5c203a0f4466; BYTE $0x0a // pinsrb xmm11, byte [rsi + rbx + 26], 10 QUAD $0x1a365c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r14 + 26], 11 QUAD $0x1a3e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r15 + 26], 12 QUAD $0x1a165c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rdx + 26], 13 - QUAD $0x1a065c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rax + 26], 14 + QUAD $0x1a265c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r12 + 26], 14 QUAD $0x1a065c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r8 + 26], 15 LONG $0x740f4566; BYTE $0xdf // pcmpeqb xmm11, xmm15 QUAD $0x0000b09ddb0f4466; BYTE $0x00 // pand xmm11, oword 176[rbp] /* [rip + .LCPI1_11] */ @@ -7318,39 +7769,39 @@ LBB1_67: QUAD $0x1b2e4c203a0f4666; BYTE $0x05 // pinsrb xmm9, byte [rsi + r13 + 27], 5 QUAD $0x1b0e4c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rcx + 27], 6 QUAD $0x1b3e4c203a0f4466; BYTE $0x07 // pinsrb xmm9, byte [rsi + rdi + 27], 7 - QUAD $0x1b264c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r12 + 27], 8 + QUAD $0x1b064c203a0f4466; BYTE $0x08 // pinsrb xmm9, byte [rsi + rax + 27], 8 QUAD $0x1b0e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r9 + 27], 9 QUAD $0x1b1e4c203a0f4466; BYTE $0x0a // pinsrb xmm9, byte [rsi + rbx + 27], 10 QUAD $0x1b364c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r14 + 27], 11 QUAD $0x1b3e4c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r15 + 27], 12 QUAD $0x1b164c203a0f4466; BYTE $0x0d // pinsrb xmm9, byte [rsi + rdx + 27], 13 - QUAD $0x1b064c203a0f4466; BYTE $0x0e // pinsrb xmm9, byte [rsi + rax + 27], 14 + QUAD $0x1b264c203a0f4666; BYTE $0x0e // pinsrb xmm9, byte [rsi + r12 + 27], 14 QUAD $0x1b064c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r8 + 27], 15 QUAD $0x1c1664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r10 + 28], 3 QUAD $0x1c1e64203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r11 + 28], 4 QUAD $0x1c2e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r13 + 28], 5 QUAD $0x061c0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 28], 6 QUAD $0x071c3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 28], 7 - QUAD $0x1c2664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r12 + 28], 8 + QUAD $0x081c0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 28], 8 QUAD $0x1c0e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 28], 9 QUAD $0x0a1c1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 28], 10 QUAD $0x1c3664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r14 + 28], 11 QUAD $0x1c3e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r15 + 28], 12 QUAD $0x0d1c1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 28], 13 - QUAD $0x0e1c0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 28], 14 + QUAD $0x1c2664203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r12 + 28], 14 QUAD $0x1c0664203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r8 + 28], 15 QUAD $0x1d166c203a0f4666; BYTE $0x03 // pinsrb xmm13, byte [rsi + r10 + 29], 3 QUAD $0x1d1e6c203a0f4666; BYTE $0x04 // pinsrb xmm13, byte [rsi + r11 + 29], 4 QUAD $0x1d2e6c203a0f4666; BYTE $0x05 // pinsrb xmm13, byte [rsi + r13 + 29], 5 QUAD $0x1d0e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rcx + 29], 6 QUAD $0x1d3e6c203a0f4466; BYTE $0x07 // pinsrb xmm13, byte [rsi + rdi + 29], 7 - QUAD $0x1d266c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r12 + 29], 8 + QUAD $0x1d066c203a0f4466; BYTE $0x08 // pinsrb xmm13, byte [rsi + rax + 29], 8 QUAD $0x1d0e6c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r9 + 29], 9 QUAD $0x1d1e6c203a0f4466; BYTE $0x0a // pinsrb xmm13, byte [rsi + rbx + 29], 10 QUAD $0x1d366c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r14 + 29], 11 QUAD $0x1d3e6c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r15 + 29], 12 QUAD $0x1d166c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rdx + 29], 13 - QUAD $0x1d066c203a0f4466; BYTE $0x0e // pinsrb xmm13, byte [rsi + rax + 29], 14 + QUAD $0x1d266c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r12 + 29], 14 LONG $0x6f0f4166; BYTE $0xcf // movdqa xmm1, xmm15 LONG $0x740f4566; BYTE $0xcf // pcmpeqb xmm9, xmm15 QUAD $0x0000c08ddb0f4466; BYTE $0x00 // pand xmm9, oword 192[rbp] /* [rip + .LCPI1_12] */ @@ -7371,21 +7822,21 @@ LBB1_67: QUAD $0x061f0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 31], 6 QUAD $0x1e3e64203a0f4466; BYTE $0x07 // pinsrb xmm12, byte [rsi + rdi + 30], 7 QUAD $0x071f3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 31], 7 - QUAD $0x1e2664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r12 + 30], 8 - QUAD $0x1f2644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r12 + 31], 8 + QUAD $0x1e0664203a0f4466; BYTE $0x08 // pinsrb xmm12, byte [rsi + rax + 30], 8 + QUAD $0x081f0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 31], 8 QUAD $0x1e0e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r9 + 30], 9 QUAD $0x1f0e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 31], 9 QUAD $0x1e1e64203a0f4466; BYTE $0x0a // pinsrb xmm12, byte [rsi + rbx + 30], 10 QUAD $0x0a1f1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 31], 10 QUAD $0x1e3664203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r14 + 30], 11 QUAD $0x1f3644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 31], 11 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x1e3e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r15 + 30], 12 QUAD $0x1f3e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r15 + 31], 12 QUAD $0x1e1664203a0f4466; BYTE $0x0d // pinsrb xmm12, byte [rsi + rdx + 30], 13 QUAD $0x0d1f1644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 31], 13 - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] - QUAD $0x1e0664203a0f4466; BYTE $0x0e // pinsrb xmm12, byte [rsi + rax + 30], 14 - QUAD $0x0e1f0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 31], 14 + QUAD $0x1e2664203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rsi + r12 + 30], 14 + QUAD $0x1f2644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r12 + 31], 14 QUAD $0x1e0664203a0f4666; BYTE $0x0f // pinsrb xmm12, byte [rsi + r8 + 30], 15 QUAD $0x1f0644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r8 + 31], 15 LONG $0xeb0f4566; BYTE $0xeb // por xmm13, xmm11 @@ -7410,33 +7861,32 @@ LBB1_67: LONG $0x610f4166; BYTE $0xc0 // punpcklwd xmm0, xmm8 LONG $0x690f4166; BYTE $0xe0 // punpckhwd xmm4, xmm8 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - LONG $0x7f0f41f3; WORD $0x8e64; BYTE $0x30 // movdqu oword [r14 + 4*rcx + 48], xmm4 - LONG $0x7f0f41f3; WORD $0x8e44; BYTE $0x20 // movdqu oword [r14 + 4*rcx + 32], xmm0 - LONG $0x7f0f41f3; WORD $0x8e54; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm2 - LONG $0x7f0f41f3; WORD $0x8e1c // movdqu oword [r14 + 4*rcx], xmm3 + LONG $0x647f0ff3; WORD $0x3088 // movdqu oword [rax + 4*rcx + 48], xmm4 + LONG $0x447f0ff3; WORD $0x2088 // movdqu oword [rax + 4*rcx + 32], xmm0 + LONG $0x547f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm2 + LONG $0x1c7f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm3 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000f8248c3b48 // cmp rcx, qword [rsp + 248] - JNE LBB1_67 + QUAD $0x000000e8248c3b48 // cmp rcx, qword [rsp + 232] + JNE LBB1_68 QUAD $0x0000010024bc8b4c // mov r15, qword [rsp + 256] - QUAD $0x000000f824bc3b4c // cmp r15, qword [rsp + 248] - LONG $0x245c8a44; BYTE $0x08 // mov r11b, byte [rsp + 8] + QUAD $0x000000e824bc3b4c // cmp r15, qword [rsp + 232] + LONG $0x245c8a44; BYTE $0x04 // mov r11b, byte [rsp + 4] QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - JNE LBB1_69 - JMP LBB1_72 + JNE LBB1_70 + JMP LBB1_73 -LBB1_110: - LONG $0xf8e38349 // and r11, -8 - WORD $0x894c; BYTE $0xd8 // mov rax, r11 +LBB1_111: + LONG $0xf8e78349 // and r15, -8 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi - LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax - LONG $0x245c894c; BYTE $0x10 // mov qword [rsp + 16], r11 - LONG $0x9e048d4b // lea rax, [r14 + 4*r11] + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax - LONG $0x246c8944; BYTE $0x38 // mov dword [rsp + 56], r13d - LONG $0x6e0f4166; BYTE $0xc5 // movd xmm0, r13d + LONG $0x6e0f4166; BYTE $0xc3 // movd xmm0, r11d LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 LONG $0xc0700f66; BYTE $0x00 // pshufd xmm0, xmm0, 0 WORD $0x3145; BYTE $0xff // xor r15d, r15d @@ -7449,27 +7899,27 @@ LBB1_110: LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI1_6] */ QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 -LBB1_111: +LBB1_112: LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LONG $0x06e7c149 // shl r15, 6 WORD $0x894d; BYTE $0xf9 // mov r9, r15 WORD $0x894d; BYTE $0xfc // mov r12, r15 WORD $0x894d; BYTE $0xfd // mov r13, r15 WORD $0x894c; BYTE $0xf9 // mov rcx, r15 - WORD $0x894c; BYTE $0xff // mov rdi, r15 + WORD $0x894c; BYTE $0xfa // mov rdx, r15 WORD $0x894c; BYTE $0xfb // mov rbx, r15 LONG $0x34b70f46; BYTE $0x3e // movzx r14d, word [rsi + r15] LONG $0x44b70f42; WORD $0x023e // movzx eax, word [rsi + r15 + 2] - LONG $0x54b70f42; WORD $0x043e // movzx edx, word [rsi + r15 + 4] - LONG $0x5cb70f46; WORD $0x063e // movzx r11d, word [rsi + r15 + 6] - LONG $0x54b70f46; WORD $0x083e // movzx r10d, word [rsi + r15 + 8] + LONG $0x54b70f46; WORD $0x043e // movzx r10d, word [rsi + r15 + 4] + LONG $0x7cb70f42; WORD $0x063e // movzx edi, word [rsi + r15 + 6] + LONG $0x5cb70f46; WORD $0x083e // movzx r11d, word [rsi + r15 + 8] WORD $0x894d; BYTE $0xf8 // mov r8, r15 LONG $0x40c88349 // or r8, 64 LONG $0x80c98149; WORD $0x0000; BYTE $0x00 // or r9, 128 LONG $0xc0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 192 LONG $0x00cd8149; WORD $0x0001; BYTE $0x00 // or r13, 256 LONG $0x40c98148; WORD $0x0001; BYTE $0x00 // or rcx, 320 - LONG $0x80cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 384 + LONG $0x80ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 384 LONG $0xc0cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 448 LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d LONG $0xc40f4266; WORD $0x0624; BYTE $0x01 // pinsrw xmm4, word [rsi + r8], 1 @@ -7477,7 +7927,7 @@ LBB1_111: LONG $0xc40f4266; WORD $0x2624; BYTE $0x03 // pinsrw xmm4, word [rsi + r12], 3 LONG $0xc40f4266; WORD $0x2e24; BYTE $0x04 // pinsrw xmm4, word [rsi + r13], 4 LONG $0x24c40f66; WORD $0x050e // pinsrw xmm4, word [rsi + rcx], 5 - LONG $0x24c40f66; WORD $0x063e // pinsrw xmm4, word [rsi + rdi], 6 + LONG $0x24c40f66; WORD $0x0616 // pinsrw xmm4, word [rsi + rdx], 6 LONG $0x24c40f66; WORD $0x071e // pinsrw xmm4, word [rsi + rbx], 7 LONG $0x74b70f46; WORD $0x0a3e // movzx r14d, word [rsi + r15 + 10] LONG $0xf06e0f66 // movd xmm6, eax @@ -7485,16 +7935,16 @@ LBB1_111: QUAD $0x02020e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 2], 2 QUAD $0x03022674c40f4266 // pinsrw xmm6, word [rsi + r12 + 2], 3 LONG $0x44b70f42; WORD $0x0c3e // movzx eax, word [rsi + r15 + 12] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x04022e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 2], 4 - LONG $0xd26e0f66 // movd xmm2, edx - LONG $0x54b70f42; WORD $0x0e3e // movzx edx, word [rsi + r15 + 14] + LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d + LONG $0x44b70f42; WORD $0x0e3e // movzx eax, word [rsi + r15 + 14] + LONG $0x10244489 // mov dword [rsp + 16], eax LONG $0x74c40f66; WORD $0x020e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 2], 5 - LONG $0x6e0f4166; BYTE $0xeb // movd xmm5, r11d - LONG $0x44b70f42; WORD $0x103e // movzx eax, word [rsi + r15 + 16] - LONG $0x18244489 // mov dword [rsp + 24], eax - LONG $0x74c40f66; WORD $0x023e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 2], 6 - LONG $0x6e0f4166; BYTE $0xda // movd xmm3, r10d + LONG $0xef6e0f66 // movd xmm5, edi + LONG $0x7cb70f42; WORD $0x103e // movzx edi, word [rsi + r15 + 16] + LONG $0x74c40f66; WORD $0x0216; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 2], 6 + LONG $0x6e0f4166; BYTE $0xdb // movd xmm3, r11d LONG $0x44b70f42; WORD $0x123e // movzx eax, word [rsi + r15 + 18] LONG $0x30244489 // mov dword [rsp + 48], eax LONG $0x74c40f66; WORD $0x021e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 2], 7 @@ -7504,7 +7954,7 @@ LBB1_111: LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 LONG $0xcef80f66 // psubb xmm1, xmm6 LONG $0x6e0f4166; BYTE $0xf6 // movd xmm6, r14d - LONG $0x5cb70f46; WORD $0x143e // movzx r11d, word [rsi + r15 + 20] + LONG $0x54b70f46; WORD $0x143e // movzx r10d, word [rsi + r15 + 20] LONG $0xe0750f66 // pcmpeqw xmm4, xmm0 LONG $0xe4630f66 // packsswb xmm4, xmm4 LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 @@ -7513,24 +7963,24 @@ LBB1_111: QUAD $0x03042654c40f4266 // pinsrw xmm2, word [rsi + r12 + 4], 3 QUAD $0x04042e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 4], 4 LONG $0x54c40f66; WORD $0x040e; BYTE $0x05 // pinsrw xmm2, word [rsi + rcx + 4], 5 - LONG $0x54c40f66; WORD $0x043e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 4], 6 + LONG $0x54c40f66; WORD $0x0416; BYTE $0x06 // pinsrw xmm2, word [rsi + rdx + 4], 6 LONG $0x54c40f66; WORD $0x041e; BYTE $0x07 // pinsrw xmm2, word [rsi + rbx + 4], 7 QUAD $0x0106066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 6], 1 QUAD $0x02060e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 6], 2 QUAD $0x0306266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 6], 3 QUAD $0x04062e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 6], 4 LONG $0x6cc40f66; WORD $0x060e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 6], 5 - LONG $0x6cc40f66; WORD $0x063e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 6], 6 + LONG $0x6cc40f66; WORD $0x0616; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 6], 6 LONG $0x6cc40f66; WORD $0x061e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 6], 7 QUAD $0x0108065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 8], 1 QUAD $0x02080e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 8], 2 QUAD $0x0308265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 8], 3 QUAD $0x04082e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 8], 4 LONG $0x5cc40f66; WORD $0x080e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 8], 5 - LONG $0x5cc40f66; WORD $0x083e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 8], 6 + LONG $0x5cc40f66; WORD $0x0816; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 8], 6 LONG $0x5cc40f66; WORD $0x081e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 8], 7 LONG $0xcceb0f66 // por xmm1, xmm4 - LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] + LONG $0x7c6e0f66; WORD $0x1824 // movd xmm7, dword [rsp + 24] LONG $0x44b70f42; WORD $0x163e // movzx eax, word [rsi + r15 + 22] LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -7538,8 +7988,8 @@ LBB1_111: LONG $0xf2710f66; BYTE $0x02 // psllw xmm2, 2 LONG $0xdb0f4166; BYTE $0xd1 // pand xmm2, xmm9 LONG $0xd1eb0f66 // por xmm2, xmm1 - LONG $0xe26e0f66 // movd xmm4, edx - LONG $0x54b70f42; WORD $0x183e // movzx edx, word [rsi + r15 + 24] + LONG $0x646e0f66; WORD $0x1024 // movd xmm4, dword [rsp + 16] + LONG $0x5cb70f46; WORD $0x183e // movzx r11d, word [rsi + r15 + 24] LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 @@ -7551,21 +8001,21 @@ LBB1_111: LONG $0xf3710f66; BYTE $0x04 // psllw xmm3, 4 LONG $0xdb0f4166; BYTE $0xdb // pand xmm3, xmm11 LONG $0xddeb0f66 // por xmm3, xmm5 - LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] - LONG $0x54b70f46; WORD $0x1a3e // movzx r10d, word [rsi + r15 + 26] + LONG $0xcf6e0f66 // movd xmm1, edi + LONG $0x7cb70f42; WORD $0x1a3e // movzx edi, word [rsi + r15 + 26] QUAD $0x010a0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 10], 1 QUAD $0x020a0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 10], 2 QUAD $0x030a2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 10], 3 QUAD $0x040a2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 10], 4 LONG $0x74c40f66; WORD $0x0a0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 10], 5 - LONG $0x74c40f66; WORD $0x0a3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 10], 6 + LONG $0x74c40f66; WORD $0x0a16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 10], 6 LONG $0x74c40f66; WORD $0x0a1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 10], 7 QUAD $0x010c067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 12], 1 QUAD $0x020c0e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 12], 2 QUAD $0x030c267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 12], 3 QUAD $0x040c2e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 12], 4 LONG $0x7cc40f66; WORD $0x0c0e; BYTE $0x05 // pinsrw xmm7, word [rsi + rcx + 12], 5 - LONG $0x7cc40f66; WORD $0x0c3e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 12], 6 + LONG $0x7cc40f66; WORD $0x0c16; BYTE $0x06 // pinsrw xmm7, word [rsi + rdx + 12], 6 LONG $0x7cc40f66; WORD $0x0c1e; BYTE $0x07 // pinsrw xmm7, word [rsi + rbx + 12], 7 LONG $0xdaeb0f66 // por xmm3, xmm2 LONG $0x6e0f4466; WORD $0x2444; BYTE $0x30 // movd xmm8, dword [rsp + 48] @@ -7581,21 +8031,21 @@ LBB1_111: LONG $0xf7710f66; BYTE $0x06 // psllw xmm7, 6 LONG $0xdb0f4166; BYTE $0xfd // pand xmm7, xmm13 LONG $0xfeeb0f66 // por xmm7, xmm6 - LONG $0x6e0f4166; BYTE $0xeb // movd xmm5, r11d - LONG $0x5cb70f46; WORD $0x1e3e // movzx r11d, word [rsi + r15 + 30] + LONG $0x6e0f4166; BYTE $0xea // movd xmm5, r10d + LONG $0x54b70f46; WORD $0x1e3e // movzx r10d, word [rsi + r15 + 30] QUAD $0x010e0664c40f4266 // pinsrw xmm4, word [rsi + r8 + 14], 1 QUAD $0x020e0e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 14], 2 QUAD $0x030e2664c40f4266 // pinsrw xmm4, word [rsi + r12 + 14], 3 QUAD $0x040e2e64c40f4266 // pinsrw xmm4, word [rsi + r13 + 14], 4 LONG $0x64c40f66; WORD $0x0e0e; BYTE $0x05 // pinsrw xmm4, word [rsi + rcx + 14], 5 - LONG $0x64c40f66; WORD $0x0e3e; BYTE $0x06 // pinsrw xmm4, word [rsi + rdi + 14], 6 + LONG $0x64c40f66; WORD $0x0e16; BYTE $0x06 // pinsrw xmm4, word [rsi + rdx + 14], 6 LONG $0x64c40f66; WORD $0x0e1e; BYTE $0x07 // pinsrw xmm4, word [rsi + rbx + 14], 7 QUAD $0x01120644c40f4666 // pinsrw xmm8, word [rsi + r8 + 18], 1 QUAD $0x02120e44c40f4666 // pinsrw xmm8, word [rsi + r9 + 18], 2 QUAD $0x03122644c40f4666 // pinsrw xmm8, word [rsi + r12 + 18], 3 QUAD $0x04122e44c40f4666 // pinsrw xmm8, word [rsi + r13 + 18], 4 QUAD $0x05120e44c40f4466 // pinsrw xmm8, word [rsi + rcx + 18], 5 - QUAD $0x06123e44c40f4466 // pinsrw xmm8, word [rsi + rdi + 18], 6 + QUAD $0x06121644c40f4466 // pinsrw xmm8, word [rsi + rdx + 18], 6 QUAD $0x07121e44c40f4466 // pinsrw xmm8, word [rsi + rbx + 18], 7 LONG $0xe0750f66 // pcmpeqw xmm4, xmm0 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -7610,28 +8060,28 @@ LBB1_111: LONG $0x6f0f4166; BYTE $0xf8 // movdqa xmm7, xmm8 LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 LONG $0xf80f4166; BYTE $0xf8 // psubb xmm7, xmm8 - LONG $0xda6e0f66 // movd xmm3, edx - LONG $0x54b70f42; WORD $0x223e // movzx edx, word [rsi + r15 + 34] - LONG $0x20245489 // mov dword [rsp + 32], edx + LONG $0x6e0f4166; BYTE $0xdb // movd xmm3, r11d + LONG $0x5cb70f46; WORD $0x223e // movzx r11d, word [rsi + r15 + 34] QUAD $0x0110064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 16], 1 QUAD $0x02100e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 16], 2 QUAD $0x0310264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 16], 3 QUAD $0x04102e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 16], 4 LONG $0x4cc40f66; WORD $0x100e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 16], 5 - LONG $0x4cc40f66; WORD $0x103e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 16], 6 + LONG $0x4cc40f66; WORD $0x1016; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 16], 6 LONG $0x4cc40f66; WORD $0x101e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 16], 7 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 LONG $0xc9630f66 // packsswb xmm1, xmm1 LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 LONG $0xf9eb0f66 // por xmm7, xmm1 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d - LONG $0x54b70f46; WORD $0x243e // movzx r10d, word [rsi + r15 + 36] + LONG $0xf76e0f66 // movd xmm6, edi + LONG $0x7cb70f42; WORD $0x243e // movzx edi, word [rsi + r15 + 36] + LONG $0x30247c89 // mov dword [rsp + 48], edi QUAD $0x0114066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 20], 1 QUAD $0x02140e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 20], 2 QUAD $0x0314266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 20], 3 QUAD $0x04142e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 20], 4 LONG $0x6cc40f66; WORD $0x140e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 20], 5 - LONG $0x6cc40f66; WORD $0x143e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 20], 6 + LONG $0x6cc40f66; WORD $0x1416; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 20], 6 LONG $0x6cc40f66; WORD $0x141e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 20], 7 LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 @@ -7640,21 +8090,21 @@ LBB1_111: LONG $0xdb0f4166; BYTE $0xe9 // pand xmm5, xmm9 LONG $0xefeb0f66 // por xmm5, xmm7 LONG $0x6e0f4166; BYTE $0xfe // movd xmm7, r14d - LONG $0x54b70f42; WORD $0x263e // movzx edx, word [rsi + r15 + 38] - LONG $0x18245489 // mov dword [rsp + 24], edx + LONG $0x7cb70f42; WORD $0x263e // movzx edi, word [rsi + r15 + 38] + LONG $0x10247c89 // mov dword [rsp + 16], edi QUAD $0x01160654c40f4266 // pinsrw xmm2, word [rsi + r8 + 22], 1 QUAD $0x02160e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 22], 2 QUAD $0x03162654c40f4266 // pinsrw xmm2, word [rsi + r12 + 22], 3 QUAD $0x04162e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 22], 4 LONG $0x54c40f66; WORD $0x160e; BYTE $0x05 // pinsrw xmm2, word [rsi + rcx + 22], 5 - LONG $0x54c40f66; WORD $0x163e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 22], 6 + LONG $0x54c40f66; WORD $0x1616; BYTE $0x06 // pinsrw xmm2, word [rsi + rdx + 22], 6 LONG $0x54c40f66; WORD $0x161e; BYTE $0x07 // pinsrw xmm2, word [rsi + rbx + 22], 7 QUAD $0x0118065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 24], 1 QUAD $0x02180e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 24], 2 QUAD $0x0318265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 24], 3 QUAD $0x04182e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 24], 4 LONG $0x5cc40f66; WORD $0x180e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 24], 5 - LONG $0x5cc40f66; WORD $0x183e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 24], 6 + LONG $0x5cc40f66; WORD $0x1816; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 24], 6 LONG $0x5cc40f66; WORD $0x181e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 24], 7 LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -7667,31 +8117,32 @@ LBB1_111: LONG $0xf3710f66; BYTE $0x04 // psllw xmm3, 4 LONG $0xdb0f4166; BYTE $0xdb // pand xmm3, xmm11 LONG $0xdaeb0f66 // por xmm3, xmm2 - LONG $0x6e0f4166; BYTE $0xd3 // movd xmm2, r11d - LONG $0x74b70f46; WORD $0x283e // movzx r14d, word [rsi + r15 + 40] + LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d + LONG $0x7cb70f42; WORD $0x283e // movzx edi, word [rsi + r15 + 40] LONG $0xddeb0f66 // por xmm3, xmm5 LONG $0xe86e0f66 // movd xmm5, eax - LONG $0x5cb70f46; WORD $0x2a3e // movzx r11d, word [rsi + r15 + 42] + LONG $0x44b70f42; WORD $0x2a3e // movzx eax, word [rsi + r15 + 42] + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x011a0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 26], 1 QUAD $0x021a0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 26], 2 QUAD $0x031a2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 26], 3 QUAD $0x041a2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 26], 4 LONG $0x74c40f66; WORD $0x1a0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 26], 5 - LONG $0x74c40f66; WORD $0x1a3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 26], 6 + LONG $0x74c40f66; WORD $0x1a16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 26], 6 LONG $0x74c40f66; WORD $0x1a1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 26], 7 QUAD $0x011c067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 28], 1 QUAD $0x021c0e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 28], 2 QUAD $0x031c267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 28], 3 QUAD $0x041c2e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 28], 4 LONG $0x7cc40f66; WORD $0x1c0e; BYTE $0x05 // pinsrw xmm7, word [rsi + rcx + 28], 5 - LONG $0x7cc40f66; WORD $0x1c3e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 28], 6 + LONG $0x7cc40f66; WORD $0x1c16; BYTE $0x06 // pinsrw xmm7, word [rsi + rdx + 28], 6 LONG $0x7cc40f66; WORD $0x1c1e; BYTE $0x07 // pinsrw xmm7, word [rsi + rbx + 28], 7 QUAD $0x011e0654c40f4266 // pinsrw xmm2, word [rsi + r8 + 30], 1 QUAD $0x021e0e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 30], 2 QUAD $0x031e2654c40f4266 // pinsrw xmm2, word [rsi + r12 + 30], 3 QUAD $0x041e2e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 30], 4 LONG $0x54c40f66; WORD $0x1e0e; BYTE $0x05 // pinsrw xmm2, word [rsi + rcx + 30], 5 - LONG $0x54c40f66; WORD $0x1e3e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 30], 6 + LONG $0x54c40f66; WORD $0x1e16; BYTE $0x06 // pinsrw xmm2, word [rsi + rdx + 30], 6 LONG $0x54c40f66; WORD $0x1e1e; BYTE $0x07 // pinsrw xmm2, word [rsi + rbx + 30], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -7704,27 +8155,27 @@ LBB1_111: LONG $0xf7710f66; BYTE $0x06 // psllw xmm7, 6 LONG $0xdb0f4166; BYTE $0xfd // pand xmm7, xmm13 LONG $0xfeeb0f66 // por xmm7, xmm6 - LONG $0x4c6e0f66; WORD $0x2024 // movd xmm1, dword [rsp + 32] - LONG $0x54b70f42; WORD $0x2c3e // movzx edx, word [rsi + r15 + 44] + LONG $0x6e0f4166; BYTE $0xcb // movd xmm1, r11d + LONG $0x54b70f46; WORD $0x2c3e // movzx r10d, word [rsi + r15 + 44] LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 LONG $0xf2710f66; BYTE $0x07 // psllw xmm2, 7 LONG $0xdb0f4166; BYTE $0xd6 // pand xmm2, xmm14 LONG $0xd7eb0f66 // por xmm2, xmm7 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d - LONG $0x44b70f42; WORD $0x2e3e // movzx eax, word [rsi + r15 + 46] + LONG $0x746e0f66; WORD $0x3024 // movd xmm6, dword [rsp + 48] + LONG $0x74b70f46; WORD $0x2e3e // movzx r14d, word [rsi + r15 + 46] QUAD $0x0120066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 32], 1 QUAD $0x02200e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 32], 2 QUAD $0x0320266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 32], 3 QUAD $0x04202e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 32], 4 LONG $0x6cc40f66; WORD $0x200e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 32], 5 - LONG $0x6cc40f66; WORD $0x203e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 32], 6 + LONG $0x6cc40f66; WORD $0x2016; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 32], 6 QUAD $0x0122064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 34], 1 QUAD $0x02220e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 34], 2 QUAD $0x0322264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 34], 3 QUAD $0x04222e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 34], 4 LONG $0x4cc40f66; WORD $0x220e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 34], 5 - LONG $0x4cc40f66; WORD $0x223e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 34], 6 + LONG $0x4cc40f66; WORD $0x2216; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 34], 6 LONG $0x4cc40f66; WORD $0x221e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 34], 7 LONG $0xd3eb0f66 // por xmm2, xmm3 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 @@ -7732,8 +8183,9 @@ LBB1_111: LONG $0xf96f0f66 // movdqa xmm7, xmm1 LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 LONG $0xf9f80f66 // psubb xmm7, xmm1 - LONG $0x5c6e0f66; WORD $0x1824 // movd xmm3, dword [rsp + 24] - LONG $0x54b70f46; WORD $0x303e // movzx r10d, word [rsi + r15 + 48] + LONG $0x5c6e0f66; WORD $0x1024 // movd xmm3, dword [rsp + 16] + LONG $0x44b70f42; WORD $0x303e // movzx eax, word [rsi + r15 + 48] + LONG $0x10244489 // mov dword [rsp + 16], eax LONG $0x6cc40f66; WORD $0x201e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 32], 7 LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 @@ -7743,32 +8195,33 @@ LBB1_111: QUAD $0x03242674c40f4266 // pinsrw xmm6, word [rsi + r12 + 36], 3 QUAD $0x04242e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 36], 4 LONG $0x74c40f66; WORD $0x240e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 36], 5 - LONG $0x74c40f66; WORD $0x243e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 36], 6 + LONG $0x74c40f66; WORD $0x2416; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 36], 6 LONG $0x74c40f66; WORD $0x241e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 36], 7 QUAD $0x0126065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 38], 1 QUAD $0x02260e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 38], 2 QUAD $0x0326265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 38], 3 QUAD $0x04262e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 38], 4 LONG $0x5cc40f66; WORD $0x260e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 38], 5 - LONG $0x5cc40f66; WORD $0x263e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 38], 6 + LONG $0x5cc40f66; WORD $0x2616; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 38], 6 LONG $0x5cc40f66; WORD $0x261e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 38], 7 LONG $0xfdeb0f66 // por xmm7, xmm5 - LONG $0x6e0f4166; BYTE $0xee // movd xmm5, r14d + LONG $0xef6e0f66 // movd xmm5, edi QUAD $0x0128066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 40], 1 QUAD $0x02280e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 40], 2 QUAD $0x0328266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 40], 3 QUAD $0x04282e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 40], 4 LONG $0x6cc40f66; WORD $0x280e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 40], 5 - LONG $0x6cc40f66; WORD $0x283e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 40], 6 - LONG $0x74b70f46; WORD $0x323e // movzx r14d, word [rsi + r15 + 50] + LONG $0x6cc40f66; WORD $0x2816; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 40], 6 + LONG $0x44b70f42; WORD $0x323e // movzx eax, word [rsi + r15 + 50] LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 LONG $0xf6710f66; BYTE $0x02 // psllw xmm6, 2 LONG $0xdb0f4166; BYTE $0xf1 // pand xmm6, xmm9 LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x6e0f4166; BYTE $0xcb // movd xmm1, r11d - LONG $0x5cb70f46; WORD $0x343e // movzx r11d, word [rsi + r15 + 52] + LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] + LONG $0x7cb70f42; WORD $0x343e // movzx edi, word [rsi + r15 + 52] + LONG $0x18247c89 // mov dword [rsp + 24], edi LONG $0x6cc40f66; WORD $0x281e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 40], 7 LONG $0xd8750f66 // pcmpeqw xmm3, xmm0 LONG $0xdb630f66 // packsswb xmm3, xmm3 @@ -7781,24 +8234,24 @@ LBB1_111: LONG $0xf5710f66; BYTE $0x04 // psllw xmm5, 4 LONG $0xdb0f4166; BYTE $0xeb // pand xmm5, xmm11 LONG $0xebeb0f66 // por xmm5, xmm3 - LONG $0xfa6e0f66 // movd xmm7, edx - LONG $0x54b70f42; WORD $0x363e // movzx edx, word [rsi + r15 + 54] + LONG $0x6e0f4166; BYTE $0xfa // movd xmm7, r10d + LONG $0x5cb70f46; WORD $0x363e // movzx r11d, word [rsi + r15 + 54] QUAD $0x012a064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 42], 1 QUAD $0x022a0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 42], 2 QUAD $0x032a264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 42], 3 QUAD $0x042a2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 42], 4 LONG $0x4cc40f66; WORD $0x2a0e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 42], 5 - LONG $0x4cc40f66; WORD $0x2a3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 42], 6 + LONG $0x4cc40f66; WORD $0x2a16; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 42], 6 LONG $0x4cc40f66; WORD $0x2a1e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 42], 7 QUAD $0x012c067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 44], 1 QUAD $0x022c0e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 44], 2 QUAD $0x032c267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 44], 3 QUAD $0x042c2e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 44], 4 LONG $0x7cc40f66; WORD $0x2c0e; BYTE $0x05 // pinsrw xmm7, word [rsi + rcx + 44], 5 - LONG $0x7cc40f66; WORD $0x2c3e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 44], 6 + LONG $0x7cc40f66; WORD $0x2c16; BYTE $0x06 // pinsrw xmm7, word [rsi + rdx + 44], 6 LONG $0xeeeb0f66 // por xmm5, xmm6 - LONG $0xd86e0f66 // movd xmm3, eax - LONG $0x44b70f42; WORD $0x383e // movzx eax, word [rsi + r15 + 56] + LONG $0x6e0f4166; BYTE $0xde // movd xmm3, r14d + LONG $0x54b70f46; WORD $0x383e // movzx r10d, word [rsi + r15 + 56] LONG $0x7cc40f66; WORD $0x2c1e; BYTE $0x07 // pinsrw xmm7, word [rsi + rbx + 44], 7 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 LONG $0xc9630f66 // packsswb xmm1, xmm1 @@ -7811,29 +8264,29 @@ LBB1_111: LONG $0xf7710f66; BYTE $0x06 // psllw xmm7, 6 LONG $0xdb0f4166; BYTE $0xfd // pand xmm7, xmm13 LONG $0xf9eb0f66 // por xmm7, xmm1 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d - LONG $0x54b70f46; WORD $0x3a3e // movzx r10d, word [rsi + r15 + 58] + LONG $0x746e0f66; WORD $0x1024 // movd xmm6, dword [rsp + 16] + LONG $0x74b70f46; WORD $0x3a3e // movzx r14d, word [rsi + r15 + 58] QUAD $0x012e065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 46], 1 QUAD $0x022e0e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 46], 2 QUAD $0x032e265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 46], 3 QUAD $0x042e2e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 46], 4 LONG $0x5cc40f66; WORD $0x2e0e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 46], 5 - LONG $0x5cc40f66; WORD $0x2e3e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 46], 6 + LONG $0x5cc40f66; WORD $0x2e16; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 46], 6 LONG $0x5cc40f66; WORD $0x2e1e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 46], 7 LONG $0xd8750f66 // pcmpeqw xmm3, xmm0 LONG $0xdb630f66 // packsswb xmm3, xmm3 LONG $0xf3710f66; BYTE $0x07 // psllw xmm3, 7 LONG $0xdb0f4166; BYTE $0xde // pand xmm3, xmm14 LONG $0xdfeb0f66 // por xmm3, xmm7 - LONG $0x6e0f4166; BYTE $0xce // movd xmm1, r14d - LONG $0x74b70f46; WORD $0x3c3e // movzx r14d, word [rsi + r15 + 60] + LONG $0xc86e0f66 // movd xmm1, eax + LONG $0x7cb70f42; WORD $0x3c3e // movzx edi, word [rsi + r15 + 60] LONG $0x7cb70f46; WORD $0x3e3e // movzx r15d, word [rsi + r15 + 62] QUAD $0x0132064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 50], 1 QUAD $0x02320e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 50], 2 QUAD $0x0332264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 50], 3 QUAD $0x04322e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 50], 4 LONG $0x4cc40f66; WORD $0x320e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 50], 5 - LONG $0x4cc40f66; WORD $0x323e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 50], 6 + LONG $0x4cc40f66; WORD $0x3216; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 50], 6 LONG $0x4cc40f66; WORD $0x321e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 50], 7 LONG $0xddeb0f66 // por xmm3, xmm5 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 @@ -7841,13 +8294,14 @@ LBB1_111: LONG $0xe96f0f66 // movdqa xmm5, xmm1 LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 LONG $0xe9f80f66 // psubb xmm5, xmm1 - LONG $0x6e0f4166; BYTE $0xcb // movd xmm1, r11d + LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x01300674c40f4266 // pinsrw xmm6, word [rsi + r8 + 48], 1 QUAD $0x02300e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 48], 2 QUAD $0x03302674c40f4266 // pinsrw xmm6, word [rsi + r12 + 48], 3 QUAD $0x04302e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 48], 4 LONG $0x74c40f66; WORD $0x300e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 48], 5 - LONG $0x74c40f66; WORD $0x303e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 48], 6 + LONG $0x74c40f66; WORD $0x3016; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 48], 6 LONG $0x74c40f66; WORD $0x301e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 48], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -7857,9 +8311,9 @@ LBB1_111: QUAD $0x04342e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 52], 4 LONG $0x4cc40f66; WORD $0x340e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 52], 5 LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0x4cc40f66; WORD $0x343e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 52], 6 + LONG $0x4cc40f66; WORD $0x3416; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 52], 6 LONG $0xeeeb0f66 // por xmm5, xmm6 - LONG $0xf26e0f66 // movd xmm6, edx + LONG $0x6e0f4166; BYTE $0xf3 // movd xmm6, r11d LONG $0x4cc40f66; WORD $0x341e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 52], 7 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 LONG $0xc9630f66 // packsswb xmm1, xmm1 @@ -7867,20 +8321,20 @@ LBB1_111: LONG $0xf1710f66; BYTE $0x02 // psllw xmm1, 2 LONG $0xdb0f4166; BYTE $0xc9 // pand xmm1, xmm9 LONG $0xcdeb0f66 // por xmm1, xmm5 - LONG $0xe86e0f66 // movd xmm5, eax + LONG $0x6e0f4166; BYTE $0xea // movd xmm5, r10d QUAD $0x01360674c40f4266 // pinsrw xmm6, word [rsi + r8 + 54], 1 QUAD $0x02360e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 54], 2 QUAD $0x03362674c40f4266 // pinsrw xmm6, word [rsi + r12 + 54], 3 QUAD $0x04362e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 54], 4 LONG $0x74c40f66; WORD $0x360e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 54], 5 - LONG $0x74c40f66; WORD $0x363e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 54], 6 + LONG $0x74c40f66; WORD $0x3616; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 54], 6 LONG $0x74c40f66; WORD $0x361e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 54], 7 QUAD $0x0138066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 56], 1 QUAD $0x02380e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 56], 2 QUAD $0x0338266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 56], 3 QUAD $0x04382e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 56], 4 LONG $0x6cc40f66; WORD $0x380e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 56], 5 - LONG $0x6cc40f66; WORD $0x383e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 56], 6 + LONG $0x6cc40f66; WORD $0x3816; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 56], 6 LONG $0x6cc40f66; WORD $0x381e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 56], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -7893,22 +8347,22 @@ LBB1_111: LONG $0xf5710f66; BYTE $0x04 // psllw xmm5, 4 LONG $0xdb0f4166; BYTE $0xeb // pand xmm5, xmm11 LONG $0xeeeb0f66 // por xmm5, xmm6 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d + LONG $0x6e0f4166; BYTE $0xf6 // movd xmm6, r14d QUAD $0x013a0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 58], 1 QUAD $0x023a0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 58], 2 QUAD $0x033a2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 58], 3 QUAD $0x043a2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 58], 4 LONG $0x74c40f66; WORD $0x3a0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 58], 5 - LONG $0x74c40f66; WORD $0x3a3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 58], 6 + LONG $0x74c40f66; WORD $0x3a16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 58], 6 LONG $0x74c40f66; WORD $0x3a1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 58], 7 LONG $0xe9eb0f66 // por xmm5, xmm1 - LONG $0x6e0f4166; BYTE $0xce // movd xmm1, r14d + LONG $0xcf6e0f66 // movd xmm1, edi QUAD $0x013c064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 60], 1 QUAD $0x023c0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 60], 2 QUAD $0x033c264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 60], 3 QUAD $0x043c2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 60], 4 LONG $0x4cc40f66; WORD $0x3c0e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 60], 5 - LONG $0x4cc40f66; WORD $0x3c3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 60], 6 + LONG $0x4cc40f66; WORD $0x3c16; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 60], 6 LONG $0x4cc40f66; WORD $0x3c1e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 60], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -7925,10 +8379,9 @@ LBB1_111: QUAD $0x013e0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 62], 1 QUAD $0x023e0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 62], 2 QUAD $0x033e2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 62], 3 - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] QUAD $0x043e2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 62], 4 LONG $0x74c40f66; WORD $0x3e0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 62], 5 - LONG $0x74c40f66; WORD $0x3e3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 62], 6 + LONG $0x74c40f66; WORD $0x3e16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 62], 6 LONG $0x74c40f66; WORD $0x3e1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 62], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -7948,31 +8401,30 @@ LBB1_111: LONG $0xe2600f66 // punpcklbw xmm4, xmm2 LONG $0xe3610f66 // punpcklwd xmm4, xmm3 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - LONG $0x7f0f41f3; WORD $0x8e24 // movdqu oword [r14 + 4*rcx], xmm4 - LONG $0x7f0f41f3; WORD $0x8e4c; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm1 + LONG $0x247f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm4 + LONG $0x4c7f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm1 LONG $0x08c18348 // add rcx, 8 WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x244c3b48; BYTE $0x10 // cmp rcx, qword [rsp + 16] - JNE LBB1_111 - QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] - LONG $0x245c3b4c; BYTE $0x10 // cmp r11, qword [rsp + 16] + LONG $0x244c3b48; BYTE $0x20 // cmp rcx, qword [rsp + 32] + JNE LBB1_112 + QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] + LONG $0x247c3b4c; BYTE $0x20 // cmp r15, qword [rsp + 32] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - LONG $0x246c8b44; BYTE $0x38 // mov r13d, dword [rsp + 56] - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - JNE LBB1_113 - JMP LBB1_116 + LONG $0x245c8b44; BYTE $0x04 // mov r11d, dword [rsp + 4] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + JNE LBB1_114 + JMP LBB1_117 -LBB1_133: +LBB1_134: LONG $0xf8e78349 // and r15, -8 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi - LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax - LONG $0x247c894c; BYTE $0x10 // mov qword [rsp + 16], r15 + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax - LONG $0x246c8944; BYTE $0x38 // mov dword [rsp + 56], r13d - LONG $0x6e0f4166; BYTE $0xc5 // movd xmm0, r13d + LONG $0x6e0f4166; BYTE $0xc3 // movd xmm0, r11d LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 LONG $0xc0700f66; BYTE $0x00 // pshufd xmm0, xmm0, 0 WORD $0x3145; BYTE $0xff // xor r15d, r15d @@ -7985,27 +8437,27 @@ LBB1_133: LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI1_6] */ QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 -LBB1_134: +LBB1_135: LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LONG $0x06e7c149 // shl r15, 6 WORD $0x894d; BYTE $0xf9 // mov r9, r15 WORD $0x894d; BYTE $0xfc // mov r12, r15 WORD $0x894d; BYTE $0xfd // mov r13, r15 WORD $0x894c; BYTE $0xf9 // mov rcx, r15 - WORD $0x894c; BYTE $0xff // mov rdi, r15 + WORD $0x894c; BYTE $0xfa // mov rdx, r15 WORD $0x894c; BYTE $0xfb // mov rbx, r15 LONG $0x34b70f46; BYTE $0x3e // movzx r14d, word [rsi + r15] LONG $0x44b70f42; WORD $0x023e // movzx eax, word [rsi + r15 + 2] - LONG $0x54b70f42; WORD $0x043e // movzx edx, word [rsi + r15 + 4] - LONG $0x5cb70f46; WORD $0x063e // movzx r11d, word [rsi + r15 + 6] - LONG $0x54b70f46; WORD $0x083e // movzx r10d, word [rsi + r15 + 8] + LONG $0x54b70f46; WORD $0x043e // movzx r10d, word [rsi + r15 + 4] + LONG $0x7cb70f42; WORD $0x063e // movzx edi, word [rsi + r15 + 6] + LONG $0x5cb70f46; WORD $0x083e // movzx r11d, word [rsi + r15 + 8] WORD $0x894d; BYTE $0xf8 // mov r8, r15 LONG $0x40c88349 // or r8, 64 LONG $0x80c98149; WORD $0x0000; BYTE $0x00 // or r9, 128 LONG $0xc0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 192 LONG $0x00cd8149; WORD $0x0001; BYTE $0x00 // or r13, 256 LONG $0x40c98148; WORD $0x0001; BYTE $0x00 // or rcx, 320 - LONG $0x80cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 384 + LONG $0x80ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 384 LONG $0xc0cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 448 LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d LONG $0xc40f4266; WORD $0x0624; BYTE $0x01 // pinsrw xmm4, word [rsi + r8], 1 @@ -8013,7 +8465,7 @@ LBB1_134: LONG $0xc40f4266; WORD $0x2624; BYTE $0x03 // pinsrw xmm4, word [rsi + r12], 3 LONG $0xc40f4266; WORD $0x2e24; BYTE $0x04 // pinsrw xmm4, word [rsi + r13], 4 LONG $0x24c40f66; WORD $0x050e // pinsrw xmm4, word [rsi + rcx], 5 - LONG $0x24c40f66; WORD $0x063e // pinsrw xmm4, word [rsi + rdi], 6 + LONG $0x24c40f66; WORD $0x0616 // pinsrw xmm4, word [rsi + rdx], 6 LONG $0x24c40f66; WORD $0x071e // pinsrw xmm4, word [rsi + rbx], 7 LONG $0x74b70f46; WORD $0x0a3e // movzx r14d, word [rsi + r15 + 10] LONG $0xf06e0f66 // movd xmm6, eax @@ -8021,16 +8473,16 @@ LBB1_134: QUAD $0x02020e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 2], 2 QUAD $0x03022674c40f4266 // pinsrw xmm6, word [rsi + r12 + 2], 3 LONG $0x44b70f42; WORD $0x0c3e // movzx eax, word [rsi + r15 + 12] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x04022e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 2], 4 - LONG $0xd26e0f66 // movd xmm2, edx - LONG $0x54b70f42; WORD $0x0e3e // movzx edx, word [rsi + r15 + 14] + LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d + LONG $0x44b70f42; WORD $0x0e3e // movzx eax, word [rsi + r15 + 14] + LONG $0x10244489 // mov dword [rsp + 16], eax LONG $0x74c40f66; WORD $0x020e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 2], 5 - LONG $0x6e0f4166; BYTE $0xeb // movd xmm5, r11d - LONG $0x44b70f42; WORD $0x103e // movzx eax, word [rsi + r15 + 16] - LONG $0x18244489 // mov dword [rsp + 24], eax - LONG $0x74c40f66; WORD $0x023e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 2], 6 - LONG $0x6e0f4166; BYTE $0xda // movd xmm3, r10d + LONG $0xef6e0f66 // movd xmm5, edi + LONG $0x7cb70f42; WORD $0x103e // movzx edi, word [rsi + r15 + 16] + LONG $0x74c40f66; WORD $0x0216; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 2], 6 + LONG $0x6e0f4166; BYTE $0xdb // movd xmm3, r11d LONG $0x44b70f42; WORD $0x123e // movzx eax, word [rsi + r15 + 18] LONG $0x30244489 // mov dword [rsp + 48], eax LONG $0x74c40f66; WORD $0x021e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 2], 7 @@ -8040,7 +8492,7 @@ LBB1_134: LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 LONG $0xcef80f66 // psubb xmm1, xmm6 LONG $0x6e0f4166; BYTE $0xf6 // movd xmm6, r14d - LONG $0x5cb70f46; WORD $0x143e // movzx r11d, word [rsi + r15 + 20] + LONG $0x54b70f46; WORD $0x143e // movzx r10d, word [rsi + r15 + 20] LONG $0xe0750f66 // pcmpeqw xmm4, xmm0 LONG $0xe4630f66 // packsswb xmm4, xmm4 LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 @@ -8049,24 +8501,24 @@ LBB1_134: QUAD $0x03042654c40f4266 // pinsrw xmm2, word [rsi + r12 + 4], 3 QUAD $0x04042e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 4], 4 LONG $0x54c40f66; WORD $0x040e; BYTE $0x05 // pinsrw xmm2, word [rsi + rcx + 4], 5 - LONG $0x54c40f66; WORD $0x043e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 4], 6 + LONG $0x54c40f66; WORD $0x0416; BYTE $0x06 // pinsrw xmm2, word [rsi + rdx + 4], 6 LONG $0x54c40f66; WORD $0x041e; BYTE $0x07 // pinsrw xmm2, word [rsi + rbx + 4], 7 QUAD $0x0106066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 6], 1 QUAD $0x02060e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 6], 2 QUAD $0x0306266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 6], 3 QUAD $0x04062e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 6], 4 LONG $0x6cc40f66; WORD $0x060e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 6], 5 - LONG $0x6cc40f66; WORD $0x063e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 6], 6 + LONG $0x6cc40f66; WORD $0x0616; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 6], 6 LONG $0x6cc40f66; WORD $0x061e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 6], 7 QUAD $0x0108065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 8], 1 QUAD $0x02080e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 8], 2 QUAD $0x0308265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 8], 3 QUAD $0x04082e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 8], 4 LONG $0x5cc40f66; WORD $0x080e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 8], 5 - LONG $0x5cc40f66; WORD $0x083e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 8], 6 + LONG $0x5cc40f66; WORD $0x0816; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 8], 6 LONG $0x5cc40f66; WORD $0x081e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 8], 7 LONG $0xcceb0f66 // por xmm1, xmm4 - LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] + LONG $0x7c6e0f66; WORD $0x1824 // movd xmm7, dword [rsp + 24] LONG $0x44b70f42; WORD $0x163e // movzx eax, word [rsi + r15 + 22] LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -8074,8 +8526,8 @@ LBB1_134: LONG $0xf2710f66; BYTE $0x02 // psllw xmm2, 2 LONG $0xdb0f4166; BYTE $0xd1 // pand xmm2, xmm9 LONG $0xd1eb0f66 // por xmm2, xmm1 - LONG $0xe26e0f66 // movd xmm4, edx - LONG $0x54b70f42; WORD $0x183e // movzx edx, word [rsi + r15 + 24] + LONG $0x646e0f66; WORD $0x1024 // movd xmm4, dword [rsp + 16] + LONG $0x5cb70f46; WORD $0x183e // movzx r11d, word [rsi + r15 + 24] LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 @@ -8087,21 +8539,21 @@ LBB1_134: LONG $0xf3710f66; BYTE $0x04 // psllw xmm3, 4 LONG $0xdb0f4166; BYTE $0xdb // pand xmm3, xmm11 LONG $0xddeb0f66 // por xmm3, xmm5 - LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] - LONG $0x54b70f46; WORD $0x1a3e // movzx r10d, word [rsi + r15 + 26] + LONG $0xcf6e0f66 // movd xmm1, edi + LONG $0x7cb70f42; WORD $0x1a3e // movzx edi, word [rsi + r15 + 26] QUAD $0x010a0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 10], 1 QUAD $0x020a0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 10], 2 QUAD $0x030a2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 10], 3 QUAD $0x040a2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 10], 4 LONG $0x74c40f66; WORD $0x0a0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 10], 5 - LONG $0x74c40f66; WORD $0x0a3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 10], 6 + LONG $0x74c40f66; WORD $0x0a16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 10], 6 LONG $0x74c40f66; WORD $0x0a1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 10], 7 QUAD $0x010c067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 12], 1 QUAD $0x020c0e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 12], 2 QUAD $0x030c267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 12], 3 QUAD $0x040c2e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 12], 4 LONG $0x7cc40f66; WORD $0x0c0e; BYTE $0x05 // pinsrw xmm7, word [rsi + rcx + 12], 5 - LONG $0x7cc40f66; WORD $0x0c3e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 12], 6 + LONG $0x7cc40f66; WORD $0x0c16; BYTE $0x06 // pinsrw xmm7, word [rsi + rdx + 12], 6 LONG $0x7cc40f66; WORD $0x0c1e; BYTE $0x07 // pinsrw xmm7, word [rsi + rbx + 12], 7 LONG $0xdaeb0f66 // por xmm3, xmm2 LONG $0x6e0f4466; WORD $0x2444; BYTE $0x30 // movd xmm8, dword [rsp + 48] @@ -8117,21 +8569,21 @@ LBB1_134: LONG $0xf7710f66; BYTE $0x06 // psllw xmm7, 6 LONG $0xdb0f4166; BYTE $0xfd // pand xmm7, xmm13 LONG $0xfeeb0f66 // por xmm7, xmm6 - LONG $0x6e0f4166; BYTE $0xeb // movd xmm5, r11d - LONG $0x5cb70f46; WORD $0x1e3e // movzx r11d, word [rsi + r15 + 30] + LONG $0x6e0f4166; BYTE $0xea // movd xmm5, r10d + LONG $0x54b70f46; WORD $0x1e3e // movzx r10d, word [rsi + r15 + 30] QUAD $0x010e0664c40f4266 // pinsrw xmm4, word [rsi + r8 + 14], 1 QUAD $0x020e0e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 14], 2 QUAD $0x030e2664c40f4266 // pinsrw xmm4, word [rsi + r12 + 14], 3 QUAD $0x040e2e64c40f4266 // pinsrw xmm4, word [rsi + r13 + 14], 4 LONG $0x64c40f66; WORD $0x0e0e; BYTE $0x05 // pinsrw xmm4, word [rsi + rcx + 14], 5 - LONG $0x64c40f66; WORD $0x0e3e; BYTE $0x06 // pinsrw xmm4, word [rsi + rdi + 14], 6 + LONG $0x64c40f66; WORD $0x0e16; BYTE $0x06 // pinsrw xmm4, word [rsi + rdx + 14], 6 LONG $0x64c40f66; WORD $0x0e1e; BYTE $0x07 // pinsrw xmm4, word [rsi + rbx + 14], 7 QUAD $0x01120644c40f4666 // pinsrw xmm8, word [rsi + r8 + 18], 1 QUAD $0x02120e44c40f4666 // pinsrw xmm8, word [rsi + r9 + 18], 2 QUAD $0x03122644c40f4666 // pinsrw xmm8, word [rsi + r12 + 18], 3 QUAD $0x04122e44c40f4666 // pinsrw xmm8, word [rsi + r13 + 18], 4 QUAD $0x05120e44c40f4466 // pinsrw xmm8, word [rsi + rcx + 18], 5 - QUAD $0x06123e44c40f4466 // pinsrw xmm8, word [rsi + rdi + 18], 6 + QUAD $0x06121644c40f4466 // pinsrw xmm8, word [rsi + rdx + 18], 6 QUAD $0x07121e44c40f4466 // pinsrw xmm8, word [rsi + rbx + 18], 7 LONG $0xe0750f66 // pcmpeqw xmm4, xmm0 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -8146,28 +8598,28 @@ LBB1_134: LONG $0x6f0f4166; BYTE $0xf8 // movdqa xmm7, xmm8 LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 LONG $0xf80f4166; BYTE $0xf8 // psubb xmm7, xmm8 - LONG $0xda6e0f66 // movd xmm3, edx - LONG $0x54b70f42; WORD $0x223e // movzx edx, word [rsi + r15 + 34] - LONG $0x20245489 // mov dword [rsp + 32], edx + LONG $0x6e0f4166; BYTE $0xdb // movd xmm3, r11d + LONG $0x5cb70f46; WORD $0x223e // movzx r11d, word [rsi + r15 + 34] QUAD $0x0110064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 16], 1 QUAD $0x02100e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 16], 2 QUAD $0x0310264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 16], 3 QUAD $0x04102e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 16], 4 LONG $0x4cc40f66; WORD $0x100e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 16], 5 - LONG $0x4cc40f66; WORD $0x103e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 16], 6 + LONG $0x4cc40f66; WORD $0x1016; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 16], 6 LONG $0x4cc40f66; WORD $0x101e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 16], 7 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 LONG $0xc9630f66 // packsswb xmm1, xmm1 LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 LONG $0xf9eb0f66 // por xmm7, xmm1 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d - LONG $0x54b70f46; WORD $0x243e // movzx r10d, word [rsi + r15 + 36] + LONG $0xf76e0f66 // movd xmm6, edi + LONG $0x7cb70f42; WORD $0x243e // movzx edi, word [rsi + r15 + 36] + LONG $0x30247c89 // mov dword [rsp + 48], edi QUAD $0x0114066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 20], 1 QUAD $0x02140e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 20], 2 QUAD $0x0314266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 20], 3 QUAD $0x04142e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 20], 4 LONG $0x6cc40f66; WORD $0x140e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 20], 5 - LONG $0x6cc40f66; WORD $0x143e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 20], 6 + LONG $0x6cc40f66; WORD $0x1416; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 20], 6 LONG $0x6cc40f66; WORD $0x141e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 20], 7 LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 @@ -8176,21 +8628,21 @@ LBB1_134: LONG $0xdb0f4166; BYTE $0xe9 // pand xmm5, xmm9 LONG $0xefeb0f66 // por xmm5, xmm7 LONG $0x6e0f4166; BYTE $0xfe // movd xmm7, r14d - LONG $0x54b70f42; WORD $0x263e // movzx edx, word [rsi + r15 + 38] - LONG $0x18245489 // mov dword [rsp + 24], edx + LONG $0x7cb70f42; WORD $0x263e // movzx edi, word [rsi + r15 + 38] + LONG $0x10247c89 // mov dword [rsp + 16], edi QUAD $0x01160654c40f4266 // pinsrw xmm2, word [rsi + r8 + 22], 1 QUAD $0x02160e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 22], 2 QUAD $0x03162654c40f4266 // pinsrw xmm2, word [rsi + r12 + 22], 3 QUAD $0x04162e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 22], 4 LONG $0x54c40f66; WORD $0x160e; BYTE $0x05 // pinsrw xmm2, word [rsi + rcx + 22], 5 - LONG $0x54c40f66; WORD $0x163e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 22], 6 + LONG $0x54c40f66; WORD $0x1616; BYTE $0x06 // pinsrw xmm2, word [rsi + rdx + 22], 6 LONG $0x54c40f66; WORD $0x161e; BYTE $0x07 // pinsrw xmm2, word [rsi + rbx + 22], 7 QUAD $0x0118065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 24], 1 QUAD $0x02180e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 24], 2 QUAD $0x0318265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 24], 3 QUAD $0x04182e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 24], 4 LONG $0x5cc40f66; WORD $0x180e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 24], 5 - LONG $0x5cc40f66; WORD $0x183e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 24], 6 + LONG $0x5cc40f66; WORD $0x1816; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 24], 6 LONG $0x5cc40f66; WORD $0x181e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 24], 7 LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -8203,31 +8655,32 @@ LBB1_134: LONG $0xf3710f66; BYTE $0x04 // psllw xmm3, 4 LONG $0xdb0f4166; BYTE $0xdb // pand xmm3, xmm11 LONG $0xdaeb0f66 // por xmm3, xmm2 - LONG $0x6e0f4166; BYTE $0xd3 // movd xmm2, r11d - LONG $0x74b70f46; WORD $0x283e // movzx r14d, word [rsi + r15 + 40] + LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d + LONG $0x7cb70f42; WORD $0x283e // movzx edi, word [rsi + r15 + 40] LONG $0xddeb0f66 // por xmm3, xmm5 LONG $0xe86e0f66 // movd xmm5, eax - LONG $0x5cb70f46; WORD $0x2a3e // movzx r11d, word [rsi + r15 + 42] + LONG $0x44b70f42; WORD $0x2a3e // movzx eax, word [rsi + r15 + 42] + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x011a0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 26], 1 QUAD $0x021a0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 26], 2 QUAD $0x031a2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 26], 3 QUAD $0x041a2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 26], 4 LONG $0x74c40f66; WORD $0x1a0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 26], 5 - LONG $0x74c40f66; WORD $0x1a3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 26], 6 + LONG $0x74c40f66; WORD $0x1a16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 26], 6 LONG $0x74c40f66; WORD $0x1a1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 26], 7 QUAD $0x011c067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 28], 1 QUAD $0x021c0e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 28], 2 QUAD $0x031c267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 28], 3 QUAD $0x041c2e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 28], 4 LONG $0x7cc40f66; WORD $0x1c0e; BYTE $0x05 // pinsrw xmm7, word [rsi + rcx + 28], 5 - LONG $0x7cc40f66; WORD $0x1c3e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 28], 6 + LONG $0x7cc40f66; WORD $0x1c16; BYTE $0x06 // pinsrw xmm7, word [rsi + rdx + 28], 6 LONG $0x7cc40f66; WORD $0x1c1e; BYTE $0x07 // pinsrw xmm7, word [rsi + rbx + 28], 7 QUAD $0x011e0654c40f4266 // pinsrw xmm2, word [rsi + r8 + 30], 1 QUAD $0x021e0e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 30], 2 QUAD $0x031e2654c40f4266 // pinsrw xmm2, word [rsi + r12 + 30], 3 QUAD $0x041e2e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 30], 4 LONG $0x54c40f66; WORD $0x1e0e; BYTE $0x05 // pinsrw xmm2, word [rsi + rcx + 30], 5 - LONG $0x54c40f66; WORD $0x1e3e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 30], 6 + LONG $0x54c40f66; WORD $0x1e16; BYTE $0x06 // pinsrw xmm2, word [rsi + rdx + 30], 6 LONG $0x54c40f66; WORD $0x1e1e; BYTE $0x07 // pinsrw xmm2, word [rsi + rbx + 30], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -8240,27 +8693,27 @@ LBB1_134: LONG $0xf7710f66; BYTE $0x06 // psllw xmm7, 6 LONG $0xdb0f4166; BYTE $0xfd // pand xmm7, xmm13 LONG $0xfeeb0f66 // por xmm7, xmm6 - LONG $0x4c6e0f66; WORD $0x2024 // movd xmm1, dword [rsp + 32] - LONG $0x54b70f42; WORD $0x2c3e // movzx edx, word [rsi + r15 + 44] + LONG $0x6e0f4166; BYTE $0xcb // movd xmm1, r11d + LONG $0x54b70f46; WORD $0x2c3e // movzx r10d, word [rsi + r15 + 44] LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 LONG $0xf2710f66; BYTE $0x07 // psllw xmm2, 7 LONG $0xdb0f4166; BYTE $0xd6 // pand xmm2, xmm14 LONG $0xd7eb0f66 // por xmm2, xmm7 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d - LONG $0x44b70f42; WORD $0x2e3e // movzx eax, word [rsi + r15 + 46] + LONG $0x746e0f66; WORD $0x3024 // movd xmm6, dword [rsp + 48] + LONG $0x74b70f46; WORD $0x2e3e // movzx r14d, word [rsi + r15 + 46] QUAD $0x0120066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 32], 1 QUAD $0x02200e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 32], 2 QUAD $0x0320266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 32], 3 QUAD $0x04202e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 32], 4 LONG $0x6cc40f66; WORD $0x200e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 32], 5 - LONG $0x6cc40f66; WORD $0x203e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 32], 6 + LONG $0x6cc40f66; WORD $0x2016; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 32], 6 QUAD $0x0122064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 34], 1 QUAD $0x02220e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 34], 2 QUAD $0x0322264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 34], 3 QUAD $0x04222e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 34], 4 LONG $0x4cc40f66; WORD $0x220e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 34], 5 - LONG $0x4cc40f66; WORD $0x223e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 34], 6 + LONG $0x4cc40f66; WORD $0x2216; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 34], 6 LONG $0x4cc40f66; WORD $0x221e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 34], 7 LONG $0xd3eb0f66 // por xmm2, xmm3 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 @@ -8268,8 +8721,9 @@ LBB1_134: LONG $0xf96f0f66 // movdqa xmm7, xmm1 LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 LONG $0xf9f80f66 // psubb xmm7, xmm1 - LONG $0x5c6e0f66; WORD $0x1824 // movd xmm3, dword [rsp + 24] - LONG $0x54b70f46; WORD $0x303e // movzx r10d, word [rsi + r15 + 48] + LONG $0x5c6e0f66; WORD $0x1024 // movd xmm3, dword [rsp + 16] + LONG $0x44b70f42; WORD $0x303e // movzx eax, word [rsi + r15 + 48] + LONG $0x10244489 // mov dword [rsp + 16], eax LONG $0x6cc40f66; WORD $0x201e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 32], 7 LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 @@ -8279,32 +8733,33 @@ LBB1_134: QUAD $0x03242674c40f4266 // pinsrw xmm6, word [rsi + r12 + 36], 3 QUAD $0x04242e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 36], 4 LONG $0x74c40f66; WORD $0x240e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 36], 5 - LONG $0x74c40f66; WORD $0x243e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 36], 6 + LONG $0x74c40f66; WORD $0x2416; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 36], 6 LONG $0x74c40f66; WORD $0x241e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 36], 7 QUAD $0x0126065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 38], 1 QUAD $0x02260e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 38], 2 QUAD $0x0326265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 38], 3 QUAD $0x04262e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 38], 4 LONG $0x5cc40f66; WORD $0x260e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 38], 5 - LONG $0x5cc40f66; WORD $0x263e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 38], 6 + LONG $0x5cc40f66; WORD $0x2616; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 38], 6 LONG $0x5cc40f66; WORD $0x261e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 38], 7 LONG $0xfdeb0f66 // por xmm7, xmm5 - LONG $0x6e0f4166; BYTE $0xee // movd xmm5, r14d + LONG $0xef6e0f66 // movd xmm5, edi QUAD $0x0128066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 40], 1 QUAD $0x02280e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 40], 2 QUAD $0x0328266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 40], 3 QUAD $0x04282e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 40], 4 LONG $0x6cc40f66; WORD $0x280e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 40], 5 - LONG $0x6cc40f66; WORD $0x283e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 40], 6 - LONG $0x74b70f46; WORD $0x323e // movzx r14d, word [rsi + r15 + 50] + LONG $0x6cc40f66; WORD $0x2816; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 40], 6 + LONG $0x44b70f42; WORD $0x323e // movzx eax, word [rsi + r15 + 50] LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 LONG $0xf6710f66; BYTE $0x02 // psllw xmm6, 2 LONG $0xdb0f4166; BYTE $0xf1 // pand xmm6, xmm9 LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x6e0f4166; BYTE $0xcb // movd xmm1, r11d - LONG $0x5cb70f46; WORD $0x343e // movzx r11d, word [rsi + r15 + 52] + LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] + LONG $0x7cb70f42; WORD $0x343e // movzx edi, word [rsi + r15 + 52] + LONG $0x18247c89 // mov dword [rsp + 24], edi LONG $0x6cc40f66; WORD $0x281e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 40], 7 LONG $0xd8750f66 // pcmpeqw xmm3, xmm0 LONG $0xdb630f66 // packsswb xmm3, xmm3 @@ -8317,24 +8772,24 @@ LBB1_134: LONG $0xf5710f66; BYTE $0x04 // psllw xmm5, 4 LONG $0xdb0f4166; BYTE $0xeb // pand xmm5, xmm11 LONG $0xebeb0f66 // por xmm5, xmm3 - LONG $0xfa6e0f66 // movd xmm7, edx - LONG $0x54b70f42; WORD $0x363e // movzx edx, word [rsi + r15 + 54] + LONG $0x6e0f4166; BYTE $0xfa // movd xmm7, r10d + LONG $0x5cb70f46; WORD $0x363e // movzx r11d, word [rsi + r15 + 54] QUAD $0x012a064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 42], 1 QUAD $0x022a0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 42], 2 QUAD $0x032a264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 42], 3 QUAD $0x042a2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 42], 4 LONG $0x4cc40f66; WORD $0x2a0e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 42], 5 - LONG $0x4cc40f66; WORD $0x2a3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 42], 6 + LONG $0x4cc40f66; WORD $0x2a16; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 42], 6 LONG $0x4cc40f66; WORD $0x2a1e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 42], 7 QUAD $0x012c067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 44], 1 QUAD $0x022c0e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 44], 2 QUAD $0x032c267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 44], 3 QUAD $0x042c2e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 44], 4 LONG $0x7cc40f66; WORD $0x2c0e; BYTE $0x05 // pinsrw xmm7, word [rsi + rcx + 44], 5 - LONG $0x7cc40f66; WORD $0x2c3e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 44], 6 + LONG $0x7cc40f66; WORD $0x2c16; BYTE $0x06 // pinsrw xmm7, word [rsi + rdx + 44], 6 LONG $0xeeeb0f66 // por xmm5, xmm6 - LONG $0xd86e0f66 // movd xmm3, eax - LONG $0x44b70f42; WORD $0x383e // movzx eax, word [rsi + r15 + 56] + LONG $0x6e0f4166; BYTE $0xde // movd xmm3, r14d + LONG $0x54b70f46; WORD $0x383e // movzx r10d, word [rsi + r15 + 56] LONG $0x7cc40f66; WORD $0x2c1e; BYTE $0x07 // pinsrw xmm7, word [rsi + rbx + 44], 7 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 LONG $0xc9630f66 // packsswb xmm1, xmm1 @@ -8347,29 +8802,29 @@ LBB1_134: LONG $0xf7710f66; BYTE $0x06 // psllw xmm7, 6 LONG $0xdb0f4166; BYTE $0xfd // pand xmm7, xmm13 LONG $0xf9eb0f66 // por xmm7, xmm1 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d - LONG $0x54b70f46; WORD $0x3a3e // movzx r10d, word [rsi + r15 + 58] + LONG $0x746e0f66; WORD $0x1024 // movd xmm6, dword [rsp + 16] + LONG $0x74b70f46; WORD $0x3a3e // movzx r14d, word [rsi + r15 + 58] QUAD $0x012e065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 46], 1 QUAD $0x022e0e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 46], 2 QUAD $0x032e265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 46], 3 QUAD $0x042e2e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 46], 4 LONG $0x5cc40f66; WORD $0x2e0e; BYTE $0x05 // pinsrw xmm3, word [rsi + rcx + 46], 5 - LONG $0x5cc40f66; WORD $0x2e3e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 46], 6 + LONG $0x5cc40f66; WORD $0x2e16; BYTE $0x06 // pinsrw xmm3, word [rsi + rdx + 46], 6 LONG $0x5cc40f66; WORD $0x2e1e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 46], 7 LONG $0xd8750f66 // pcmpeqw xmm3, xmm0 LONG $0xdb630f66 // packsswb xmm3, xmm3 LONG $0xf3710f66; BYTE $0x07 // psllw xmm3, 7 LONG $0xdb0f4166; BYTE $0xde // pand xmm3, xmm14 LONG $0xdfeb0f66 // por xmm3, xmm7 - LONG $0x6e0f4166; BYTE $0xce // movd xmm1, r14d - LONG $0x74b70f46; WORD $0x3c3e // movzx r14d, word [rsi + r15 + 60] + LONG $0xc86e0f66 // movd xmm1, eax + LONG $0x7cb70f42; WORD $0x3c3e // movzx edi, word [rsi + r15 + 60] LONG $0x7cb70f46; WORD $0x3e3e // movzx r15d, word [rsi + r15 + 62] QUAD $0x0132064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 50], 1 QUAD $0x02320e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 50], 2 QUAD $0x0332264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 50], 3 QUAD $0x04322e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 50], 4 LONG $0x4cc40f66; WORD $0x320e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 50], 5 - LONG $0x4cc40f66; WORD $0x323e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 50], 6 + LONG $0x4cc40f66; WORD $0x3216; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 50], 6 LONG $0x4cc40f66; WORD $0x321e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 50], 7 LONG $0xddeb0f66 // por xmm3, xmm5 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 @@ -8377,13 +8832,14 @@ LBB1_134: LONG $0xe96f0f66 // movdqa xmm5, xmm1 LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 LONG $0xe9f80f66 // psubb xmm5, xmm1 - LONG $0x6e0f4166; BYTE $0xcb // movd xmm1, r11d + LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] QUAD $0x01300674c40f4266 // pinsrw xmm6, word [rsi + r8 + 48], 1 QUAD $0x02300e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 48], 2 QUAD $0x03302674c40f4266 // pinsrw xmm6, word [rsi + r12 + 48], 3 QUAD $0x04302e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 48], 4 LONG $0x74c40f66; WORD $0x300e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 48], 5 - LONG $0x74c40f66; WORD $0x303e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 48], 6 + LONG $0x74c40f66; WORD $0x3016; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 48], 6 LONG $0x74c40f66; WORD $0x301e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 48], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -8393,9 +8849,9 @@ LBB1_134: QUAD $0x04342e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 52], 4 LONG $0x4cc40f66; WORD $0x340e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 52], 5 LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0x4cc40f66; WORD $0x343e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 52], 6 + LONG $0x4cc40f66; WORD $0x3416; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 52], 6 LONG $0xeeeb0f66 // por xmm5, xmm6 - LONG $0xf26e0f66 // movd xmm6, edx + LONG $0x6e0f4166; BYTE $0xf3 // movd xmm6, r11d LONG $0x4cc40f66; WORD $0x341e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 52], 7 LONG $0xc8750f66 // pcmpeqw xmm1, xmm0 LONG $0xc9630f66 // packsswb xmm1, xmm1 @@ -8403,20 +8859,20 @@ LBB1_134: LONG $0xf1710f66; BYTE $0x02 // psllw xmm1, 2 LONG $0xdb0f4166; BYTE $0xc9 // pand xmm1, xmm9 LONG $0xcdeb0f66 // por xmm1, xmm5 - LONG $0xe86e0f66 // movd xmm5, eax + LONG $0x6e0f4166; BYTE $0xea // movd xmm5, r10d QUAD $0x01360674c40f4266 // pinsrw xmm6, word [rsi + r8 + 54], 1 QUAD $0x02360e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 54], 2 QUAD $0x03362674c40f4266 // pinsrw xmm6, word [rsi + r12 + 54], 3 QUAD $0x04362e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 54], 4 LONG $0x74c40f66; WORD $0x360e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 54], 5 - LONG $0x74c40f66; WORD $0x363e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 54], 6 + LONG $0x74c40f66; WORD $0x3616; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 54], 6 LONG $0x74c40f66; WORD $0x361e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 54], 7 QUAD $0x0138066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 56], 1 QUAD $0x02380e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 56], 2 QUAD $0x0338266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 56], 3 QUAD $0x04382e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 56], 4 LONG $0x6cc40f66; WORD $0x380e; BYTE $0x05 // pinsrw xmm5, word [rsi + rcx + 56], 5 - LONG $0x6cc40f66; WORD $0x383e; BYTE $0x06 // pinsrw xmm5, word [rsi + rdi + 56], 6 + LONG $0x6cc40f66; WORD $0x3816; BYTE $0x06 // pinsrw xmm5, word [rsi + rdx + 56], 6 LONG $0x6cc40f66; WORD $0x381e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 56], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -8429,22 +8885,22 @@ LBB1_134: LONG $0xf5710f66; BYTE $0x04 // psllw xmm5, 4 LONG $0xdb0f4166; BYTE $0xeb // pand xmm5, xmm11 LONG $0xeeeb0f66 // por xmm5, xmm6 - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d + LONG $0x6e0f4166; BYTE $0xf6 // movd xmm6, r14d QUAD $0x013a0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 58], 1 QUAD $0x023a0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 58], 2 QUAD $0x033a2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 58], 3 QUAD $0x043a2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 58], 4 LONG $0x74c40f66; WORD $0x3a0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 58], 5 - LONG $0x74c40f66; WORD $0x3a3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 58], 6 + LONG $0x74c40f66; WORD $0x3a16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 58], 6 LONG $0x74c40f66; WORD $0x3a1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 58], 7 LONG $0xe9eb0f66 // por xmm5, xmm1 - LONG $0x6e0f4166; BYTE $0xce // movd xmm1, r14d + LONG $0xcf6e0f66 // movd xmm1, edi QUAD $0x013c064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 60], 1 QUAD $0x023c0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 60], 2 QUAD $0x033c264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 60], 3 QUAD $0x043c2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 60], 4 LONG $0x4cc40f66; WORD $0x3c0e; BYTE $0x05 // pinsrw xmm1, word [rsi + rcx + 60], 5 - LONG $0x4cc40f66; WORD $0x3c3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 60], 6 + LONG $0x4cc40f66; WORD $0x3c16; BYTE $0x06 // pinsrw xmm1, word [rsi + rdx + 60], 6 LONG $0x4cc40f66; WORD $0x3c1e; BYTE $0x07 // pinsrw xmm1, word [rsi + rbx + 60], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -8461,10 +8917,9 @@ LBB1_134: QUAD $0x013e0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 62], 1 QUAD $0x023e0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 62], 2 QUAD $0x033e2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 62], 3 - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] QUAD $0x043e2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 62], 4 LONG $0x74c40f66; WORD $0x3e0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 62], 5 - LONG $0x74c40f66; WORD $0x3e3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 62], 6 + LONG $0x74c40f66; WORD $0x3e16; BYTE $0x06 // pinsrw xmm6, word [rsi + rdx + 62], 6 LONG $0x74c40f66; WORD $0x3e1e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 62], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -8484,22 +8939,21 @@ LBB1_134: LONG $0xe2600f66 // punpcklbw xmm4, xmm2 LONG $0xe3610f66 // punpcklwd xmm4, xmm3 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - LONG $0x7f0f41f3; WORD $0x8e24 // movdqu oword [r14 + 4*rcx], xmm4 - LONG $0x7f0f41f3; WORD $0x8e4c; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm1 + LONG $0x247f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm4 + LONG $0x4c7f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm1 LONG $0x08c18348 // add rcx, 8 WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x244c3b48; BYTE $0x10 // cmp rcx, qword [rsp + 16] - JNE LBB1_134 - QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] - LONG $0x247c3b4c; BYTE $0x10 // cmp r15, qword [rsp + 16] + LONG $0x244c3b48; BYTE $0x20 // cmp rcx, qword [rsp + 32] + JNE LBB1_135 + QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] + LONG $0x247c3b4c; BYTE $0x20 // cmp r15, qword [rsp + 32] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - LONG $0x246c8b44; BYTE $0x38 // mov r13d, dword [rsp + 56] - LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - JNE LBB1_136 - JMP LBB1_139 + LONG $0x245c8b44; BYTE $0x04 // mov r11d, dword [rsp + 4] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + JNE LBB1_137 + JMP LBB1_140 -LBB1_184: +LBB1_182: WORD $0x894d; BYTE $0xd8 // mov r8, r11 LONG $0xfce08349 // and r8, -4 WORD $0x894c; BYTE $0xc3 // mov rbx, r8 @@ -8519,7 +8973,7 @@ LBB1_184: LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI1_6] */ LONG $0x6f0f4466; WORD $0x704d // movdqa xmm9, oword 112[rbp] /* [rip + .LCPI1_7] */ -LBB1_185: +LBB1_183: QUAD $0xfffffe04b6100ff3 // movss xmm6, dword [rsi - 508] QUAD $0xfffffe08be100ff3 // movss xmm7, dword [rsi - 504] QUAD $0xfffffe0cae100ff3 // movss xmm5, dword [rsi - 500] @@ -8864,10 +9318,10 @@ LBB1_185: LONG $0x04c18348 // add rcx, 4 LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // add rsi, 512 WORD $0x3949; BYTE $0xc8 // cmp r8, rcx - JNE LBB1_185 + JNE LBB1_183 WORD $0x394d; BYTE $0xc3 // cmp r11, r8 - JNE LBB1_187 - JMP LBB1_190 + JNE LBB1_185 + JMP LBB1_188 DATA LCDATA2<>+0x000(SB)/8, $0x0000000001010101 DATA LCDATA2<>+0x008(SB)/8, $0x0000000000000000 @@ -8928,7 +9382,7 @@ TEXT ·_comparison_equal_scalar_arr_sse4(SB), $328-48 WORD $0xff83; BYTE $0x05 // cmp edi, 5 JE LBB2_95 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB2_176 + JNE LBB2_177 WORD $0x8b44; BYTE $0x2e // mov r13d, dword [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -8978,7 +9432,7 @@ LBB2_11: WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x046a3b44 // cmp r13d, dword [rdx + 4] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086a3b44 // cmp r13d, dword [rdx + 8] LONG $0xd6940f41 // sete r14b LONG $0x0c6a3b44 // cmp r13d, dword [rdx + 12] @@ -8988,9 +9442,9 @@ LBB2_11: LONG $0x146a3b44 // cmp r13d, dword [rdx + 20] LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x186a3b44 // cmp r13d, dword [rdx + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x1c6a3b44 // cmp r13d, dword [rdx + 28] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x206a3b44 // cmp r13d, dword [rdx + 32] QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x246a3b44 // cmp r13d, dword [rdx + 36] @@ -9000,13 +9454,13 @@ LBB2_11: LONG $0x2c6a3b44 // cmp r13d, dword [rdx + 44] LONG $0xd1940f41 // sete r9b LONG $0x306a3b44 // cmp r13d, dword [rdx + 48] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x346a3b44 // cmp r13d, dword [rdx + 52] LONG $0xd4940f41 // sete r12b LONG $0x386a3b44 // cmp r13d, dword [rdx + 56] QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x3c6a3b44 // cmp r13d, dword [rdx + 60] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x406a3b44 // cmp r13d, dword [rdx + 64] LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x446a3b44 // cmp r13d, dword [rdx + 68] @@ -9026,107 +9480,106 @@ LBB2_11: LONG $0x606a3b44 // cmp r13d, dword [rdx + 96] LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] LONG $0x646a3b44 // cmp r13d, dword [rdx + 100] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x686a3b44 // cmp r13d, dword [rdx + 104] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x686a3b44 // cmp r13d, dword [rdx + 104] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x6c6a3b44 // cmp r13d, dword [rdx + 108] LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x706a3b44 // cmp r13d, dword [rdx + 112] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x746a3b44 // cmp r13d, dword [rdx + 116] LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x746a3b44 // cmp r13d, dword [rdx + 116] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x786a3b44 // cmp r13d, dword [rdx + 120] LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x7c6a3b44 // cmp r13d, dword [rdx + 124] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000c024940244 // add r10b, byte [rsp + 192] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil QUAD $0x000000d024b40240 // add sil, byte [rsp + 208] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x000000a0249cb60f // movzx ebx, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] - WORD $0xc689 // mov esi, eax - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc789 // mov edi, eax - LONG $0x24048b48 // mov rax, qword [rsp] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b + WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x24348b48 // mov rsi, qword [rsp] - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + QUAD $0x0000b02484b60f44; BYTE $0x00 // movzx r8d, byte [rsp + 176] + LONG $0x06e0c041 // shl r8b, 6 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xdb00 // add bl, bl + LONG $0x48245c02 // add bl, byte [rsp + 72] + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8840; BYTE $0x3e // mov byte [rsi], dil + LONG $0x247cb60f; BYTE $0x40 // movzx edi, byte [rsp + 64] + LONG $0x06e7c040 // shl dil, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - WORD $0x4e88; BYTE $0x01 // mov byte [rsi + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0x4688; BYTE $0x01 // mov byte [rsi + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xc000 // add al, al LONG $0x18244402 // add al, byte [rsp + 24] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - WORD $0xcb08 // or bl, cl LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x5e88; BYTE $0x03 // mov byte [rsi + 3], bl + WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 LONG $0x04c68348 // add rsi, 4 LONG $0x24348948 // mov qword [rsp], rsi @@ -9138,7 +9591,7 @@ LBB2_11: LBB2_13: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 @@ -9178,7 +9631,7 @@ LBB2_16: LONG $0x33048841 // mov byte [r11 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB2_16 - JMP LBB2_152 + JMP LBB2_153 LBB2_17: WORD $0xff83; BYTE $0x08 // cmp edi, 8 @@ -9188,7 +9641,7 @@ LBB2_17: WORD $0xff83; BYTE $0x0b // cmp edi, 11 JE LBB2_118 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB2_176 + JNE LBB2_177 LONG $0x1f728d4d // lea r14, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xf2490f4d // cmovns r14, r10 @@ -9205,7 +9658,9 @@ LBB2_17: LBB2_23: LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl WORD $0xdbf6 // neg bl LONG $0x07708d48 // lea rsi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax @@ -9231,184 +9686,270 @@ LBB2_25: LONG $0x20fa8349 // cmp r10, 32 JL LBB2_29 QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 - QUAD $0x000000c024b4894c // mov qword [rsp + 192], r14 + QUAD $0x000000f024b4894c // mov qword [rsp + 240], r14 + QUAD $0x0000009824b4894c // mov qword [rsp + 152], r14 LBB2_27: LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x422e0f66; BYTE $0x08 // ucomisd xmm0, qword [rdx + 8] - LONG $0xd0940f41 // sete r8b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xc320 // and bl, al LONG $0x422e0f66; BYTE $0x10 // ucomisd xmm0, qword [rdx + 16] - LONG $0xd3940f41 // sete r11b - LONG $0x422e0f66; BYTE $0x18 // ucomisd xmm0, qword [rdx + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd5940f41 // sete r13b + WORD $0x2041; BYTE $0xc5 // and r13b, al + LONG $0x422e0f66; BYTE $0x18 // ucomisd xmm0, qword [rdx + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x422e0f66; BYTE $0x20 // ucomisd xmm0, qword [rdx + 32] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x422e0f66; BYTE $0x28 // ucomisd xmm0, qword [rdx + 40] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x422e0f66; BYTE $0x30 // ucomisd xmm0, qword [rdx + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x422e0f66; BYTE $0x38 // ucomisd xmm0, qword [rdx + 56] - LONG $0xd6940f41 // sete r14b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x422e0f66; BYTE $0x40 // ucomisd xmm0, qword [rdx + 64] - QUAD $0x000000b02494940f // sete byte [rsp + 176] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x422e0f66; BYTE $0x48 // ucomisd xmm0, qword [rdx + 72] - LONG $0xd6940f40 // sete sil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x422e0f66; BYTE $0x50 // ucomisd xmm0, qword [rdx + 80] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x422e0f66; BYTE $0x58 // ucomisd xmm0, qword [rdx + 88] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x422e0f66; BYTE $0x60 // ucomisd xmm0, qword [rdx + 96] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x422e0f66; BYTE $0x68 // ucomisd xmm0, qword [rdx + 104] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x422e0f66; BYTE $0x70 // ucomisd xmm0, qword [rdx + 112] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x422e0f66; BYTE $0x78 // ucomisd xmm0, qword [rdx + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl QUAD $0x00000080822e0f66 // ucomisd xmm0, qword [rdx + 128] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl QUAD $0x00000088822e0f66 // ucomisd xmm0, qword [rdx + 136] - QUAD $0x000000d02494940f // sete byte [rsp + 208] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl QUAD $0x00000090822e0f66 // ucomisd xmm0, qword [rdx + 144] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl QUAD $0x00000098822e0f66 // ucomisd xmm0, qword [rdx + 152] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl QUAD $0x000000a0822e0f66 // ucomisd xmm0, qword [rdx + 160] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xd0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 208], cl QUAD $0x000000a8822e0f66 // ucomisd xmm0, qword [rdx + 168] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xc0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 192], cl QUAD $0x000000b0822e0f66 // ucomisd xmm0, qword [rdx + 176] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - QUAD $0x000000b8822e0f66 // ucomisd xmm0, qword [rdx + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al + QUAD $0x000000b8822e0f66 // ucomisd xmm0, qword [rdx + 184] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al QUAD $0x000000c0822e0f66 // ucomisd xmm0, qword [rdx + 192] - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl QUAD $0x000000c8822e0f66 // ucomisd xmm0, qword [rdx + 200] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al QUAD $0x000000d0822e0f66 // ucomisd xmm0, qword [rdx + 208] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al QUAD $0x000000d8822e0f66 // ucomisd xmm0, qword [rdx + 216] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al QUAD $0x000000e0822e0f66 // ucomisd xmm0, qword [rdx + 224] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al QUAD $0x000000e8822e0f66 // ucomisd xmm0, qword [rdx + 232] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl QUAD $0x000000f0822e0f66 // ucomisd xmm0, qword [rdx + 240] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al QUAD $0x000000f8822e0f66 // ucomisd xmm0, qword [rdx + 248] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000a024840244 // add r8b, byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f40 // sete sil + WORD $0x2040; BYTE $0xc6 // and sil, al + WORD $0xdb00 // add bl, bl + LONG $0x18245c02 // add bl, byte [rsp + 24] + LONG $0x05e7c040 // shl dil, 5 + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e6c041 // shl r14b, 7 - WORD $0x0841; BYTE $0xc6 // or r14b, al - LONG $0x02e3c041 // shl r11b, 2 - WORD $0x0845; BYTE $0xc3 // or r11b, r8b - WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000b024b40240 // add sil, byte [rsp + 176] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xdd // or r13b, r11b - LONG $0x24048b4c // mov r8, qword [rsp] - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0x8941; BYTE $0xc3 // mov r11d, eax - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0841; BYTE $0xf9 // or r9b, dil - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd8 // or al, r11b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x78 // movzx esi, byte [rsp + 120] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc6 // or r14b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] - WORD $0xc689 // mov esi, eax - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x08244488 // mov byte [rsp + 8], al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0x8845; BYTE $0x30 // mov byte [r8], r14b - LONG $0x2474b60f; BYTE $0x40 // movzx esi, byte [rsp + 64] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x01488841 // mov byte [r8 + 1], cl - WORD $0x0841; BYTE $0xc7 // or r15b, al - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xc000 // add al, al - LONG $0x18244402 // add al, byte [rsp + 24] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x247cb60f; BYTE $0x48 // movzx edi, byte [rsp + 72] + LONG $0x05e7c040 // shl dil, 5 + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xcb08 // or bl, cl - WORD $0xc308 // or bl, al - LONG $0x02788845 // mov byte [r8 + 2], r15b - LONG $0x03588841 // mov byte [r8 + 3], bl + WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x6cb60f44; WORD $0x7024 // movzx r13d, byte [rsp + 112] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xcd // or r13b, cl + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + WORD $0xc900 // add cl, cl + LONG $0x50244c02 // add cl, byte [rsp + 80] + LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0840; BYTE $0xcf // or dil, cl + QUAD $0x000000b0248cb60f // movzx ecx, byte [rsp + 176] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0x0840; BYTE $0xf9 // or cl, dil + WORD $0xcf89 // mov edi, ecx + QUAD $0x000000d0248cb60f // movzx ecx, byte [rsp + 208] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x247cb60f; BYTE $0x08 // movzx edi, byte [rsp + 8] + WORD $0x0840; BYTE $0xc7 // or dil, al + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + LONG $0x06e7c041 // shl r15b, 6 + WORD $0x0841; BYTE $0xc7 // or r15b, al + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x07e1c041 // shl r9b, 7 + WORD $0x0845; BYTE $0xf9 // or r9b, r15b + LONG $0x24048b48 // mov rax, qword [rsp] + WORD $0x0841; BYTE $0xc9 // or r9b, cl + WORD $0x0045; BYTE $0xf6 // add r14b, r14b + QUAD $0x000000a024b40244 // add r14b, byte [rsp + 160] + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0845; BYTE $0xf3 // or r11b, r14b + LONG $0x03e2c041 // shl r10b, 3 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x04e4c041 // shl r12b, 4 + WORD $0x0845; BYTE $0xd4 // or r12b, r10b + WORD $0x8840; BYTE $0x38 // mov byte [rax], dil + QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x06e0c041 // shl r8b, 6 + WORD $0x0841; BYTE $0xc8 // or r8b, cl + LONG $0x01688844 // mov byte [rax + 1], r13b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0844; BYTE $0xc6 // or sil, r8b + WORD $0x0844; BYTE $0xe6 // or sil, r12b + LONG $0x02488844 // mov byte [rax + 2], r9b + LONG $0x03708840 // mov byte [rax + 3], sil LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c08349 // add r8, 4 - LONG $0x2404894c // mov qword [rsp], r8 - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + LONG $0x04c08348 // add rax, 4 + LONG $0x24048948 // mov qword [rsp], rax + QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 JNE LBB2_27 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] + QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] LBB2_29: LONG $0x05e6c149 // shl r14, 5 WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf0 // sub r8, r14 WORD $0xf749; BYTE $0xd6 // not r14 WORD $0x014d; BYTE $0xd6 // add r14, r10 - JNE LBB2_161 + JNE LBB2_162 WORD $0xff31 // xor edi, edi - JMP LBB2_163 + JMP LBB2_164 LBB2_32: WORD $0xff83; BYTE $0x02 // cmp edi, 2 JE LBB2_60 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB2_176 + JNE LBB2_177 WORD $0x8a44; BYTE $0x36 // mov r14b, byte [rsi] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -9453,27 +9994,27 @@ LBB2_38: LONG $0x10ff8349 // cmp r15, 16 LONG $0x24748844; BYTE $0x08 // mov byte [rsp + 8], r14b QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x000000f024bc894c // mov qword [rsp + 240], r15 + QUAD $0x000000e824bc894c // mov qword [rsp + 232], r15 JB LBB2_42 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx LONG $0x24043948 // cmp qword [rsp], rax - JAE LBB2_185 + JAE LBB2_182 LONG $0x24048b48 // mov rax, qword [rsp] LONG $0xb8048d4a // lea rax, [rax + 4*r15] WORD $0x3948; BYTE $0xc2 // cmp rdx, rax - JAE LBB2_185 + JAE LBB2_182 LBB2_42: WORD $0xc031 // xor eax, eax - QUAD $0x000000e824848948 // mov qword [rsp + 232], rax + QUAD $0x000000e024848948 // mov qword [rsp + 224], rax WORD $0x8948; BYTE $0xd6 // mov rsi, rdx LONG $0x24048b48 // mov rax, qword [rsp] LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax LBB2_43: - QUAD $0x000000e824bc2b4c // sub r15, qword [rsp + 232] + QUAD $0x000000e024bc2b4c // sub r15, qword [rsp + 224] QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 LBB2_44: @@ -9539,10 +10080,10 @@ LBB2_44: LONG $0xd2940f41 // sete r10b LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x18 // cmp dl, byte [rcx + 24] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x19 // cmp dl, byte [rcx + 25] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1a // cmp dl, byte [rcx + 26] LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] @@ -9551,10 +10092,10 @@ LBB2_44: LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1c // cmp dl, byte [rcx + 28] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1d // cmp dl, byte [rcx + 29] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1e // cmp dl, byte [rcx + 30] LONG $0x2414940f // sete byte [rsp] @@ -9621,9 +10162,9 @@ LBB2_44: WORD $0x0841; BYTE $0xf2 // or r10b, sil WORD $0x4788; BYTE $0x01 // mov byte [rdi + 1], al WORD $0x0841; BYTE $0xda // or r10b, bl - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xc000 // add al, al - LONG $0x38244402 // add al, byte [rsp + 56] + LONG $0x20244402 // add al, byte [rsp + 32] WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x02 // shl al, 2 @@ -9633,11 +10174,11 @@ LBB2_44: WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xd808 // or al, bl LONG $0x241cb60f // movzx ebx, byte [rsp] @@ -9653,14 +10194,14 @@ LBB2_44: QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 JNE LBB2_44 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] + QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] JMP LBB2_131 LBB2_46: WORD $0xff83; BYTE $0x07 // cmp edi, 7 JE LBB2_72 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB2_176 + JNE LBB2_177 WORD $0x8b4c; BYTE $0x2e // mov r13, qword [rsi] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -9710,7 +10251,7 @@ LBB2_54: WORD $0x3b4c; BYTE $0x2a // cmp r13, qword [rdx] QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x086a3b4c // cmp r13, qword [rdx + 8] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106a3b4c // cmp r13, qword [rdx + 16] LONG $0xd6940f41 // sete r14b LONG $0x186a3b4c // cmp r13, qword [rdx + 24] @@ -9720,9 +10261,9 @@ LBB2_54: LONG $0x286a3b4c // cmp r13, qword [rdx + 40] LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x306a3b4c // cmp r13, qword [rdx + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x386a3b4c // cmp r13, qword [rdx + 56] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x406a3b4c // cmp r13, qword [rdx + 64] QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x486a3b4c // cmp r13, qword [rdx + 72] @@ -9732,13 +10273,13 @@ LBB2_54: LONG $0x586a3b4c // cmp r13, qword [rdx + 88] LONG $0xd1940f41 // sete r9b LONG $0x606a3b4c // cmp r13, qword [rdx + 96] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x686a3b4c // cmp r13, qword [rdx + 104] LONG $0xd4940f41 // sete r12b LONG $0x706a3b4c // cmp r13, qword [rdx + 112] QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x786a3b4c // cmp r13, qword [rdx + 120] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x80aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 128] LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x88aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 136] @@ -9758,107 +10299,106 @@ LBB2_54: LONG $0xc0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 192] LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] LONG $0xc8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 200] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0xd0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 208] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0xd0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 208] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0xd8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 216] LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0xe0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 224] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0xe8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 232] LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0xe8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 232] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xf0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 240] LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0xf8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 248] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000c024940244 // add r10b, byte [rsp + 192] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil QUAD $0x000000d024b40240 // add sil, byte [rsp + 208] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x000000a0249cb60f // movzx ebx, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] - WORD $0xc689 // mov esi, eax - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc789 // mov edi, eax - LONG $0x24048b48 // mov rax, qword [rsp] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b + WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x24348b48 // mov rsi, qword [rsp] - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + QUAD $0x0000b02484b60f44; BYTE $0x00 // movzx r8d, byte [rsp + 176] + LONG $0x06e0c041 // shl r8b, 6 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xdb00 // add bl, bl + LONG $0x48245c02 // add bl, byte [rsp + 72] + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8840; BYTE $0x3e // mov byte [rsi], dil + LONG $0x247cb60f; BYTE $0x40 // movzx edi, byte [rsp + 64] + LONG $0x06e7c040 // shl dil, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - WORD $0x4e88; BYTE $0x01 // mov byte [rsi + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0x4688; BYTE $0x01 // mov byte [rsi + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xc000 // add al, al LONG $0x18244402 // add al, byte [rsp + 24] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - WORD $0xcb08 // or bl, cl LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x5e88; BYTE $0x03 // mov byte [rsi + 3], bl + WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 LONG $0x04c68348 // add rsi, 4 LONG $0x24348948 // mov qword [rsp], rsi @@ -9870,7 +10410,7 @@ LBB2_54: LBB2_56: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 @@ -9910,7 +10450,7 @@ LBB2_59: LONG $0x33048841 // mov byte [r11 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi JNE LBB2_59 - JMP LBB2_167 + JMP LBB2_168 LBB2_60: WORD $0x8a44; BYTE $0x36 // mov r14b, byte [rsi] @@ -9957,27 +10497,27 @@ LBB2_64: LONG $0x10ff8349 // cmp r15, 16 LONG $0x24748844; BYTE $0x08 // mov byte [rsp + 8], r14b QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x000000f024bc894c // mov qword [rsp + 240], r15 + QUAD $0x000000e824bc894c // mov qword [rsp + 232], r15 JB LBB2_68 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx LONG $0x24043948 // cmp qword [rsp], rax - JAE LBB2_188 + JAE LBB2_185 LONG $0x24048b48 // mov rax, qword [rsp] LONG $0xb8048d4a // lea rax, [rax + 4*r15] WORD $0x3948; BYTE $0xc2 // cmp rdx, rax - JAE LBB2_188 + JAE LBB2_185 LBB2_68: WORD $0xc031 // xor eax, eax - QUAD $0x000000e824848948 // mov qword [rsp + 232], rax + QUAD $0x000000e024848948 // mov qword [rsp + 224], rax WORD $0x8948; BYTE $0xd6 // mov rsi, rdx LONG $0x24048b48 // mov rax, qword [rsp] LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax LBB2_69: - QUAD $0x000000e824bc2b4c // sub r15, qword [rsp + 232] + QUAD $0x000000e024bc2b4c // sub r15, qword [rsp + 224] QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 LBB2_70: @@ -10043,10 +10583,10 @@ LBB2_70: LONG $0xd2940f41 // sete r10b LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x18 // cmp dl, byte [rcx + 24] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x19 // cmp dl, byte [rcx + 25] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1a // cmp dl, byte [rcx + 26] LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] @@ -10055,10 +10595,10 @@ LBB2_70: LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1c // cmp dl, byte [rcx + 28] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1d // cmp dl, byte [rcx + 29] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0x513a; BYTE $0x1e // cmp dl, byte [rcx + 30] LONG $0x2414940f // sete byte [rsp] @@ -10125,9 +10665,9 @@ LBB2_70: WORD $0x0841; BYTE $0xf2 // or r10b, sil WORD $0x4788; BYTE $0x01 // mov byte [rdi + 1], al WORD $0x0841; BYTE $0xda // or r10b, bl - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xc000 // add al, al - LONG $0x38244402 // add al, byte [rsp + 56] + LONG $0x20244402 // add al, byte [rsp + 32] WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x02 // shl al, 2 @@ -10137,11 +10677,11 @@ LBB2_70: WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xd808 // or al, bl LONG $0x241cb60f // movzx ebx, byte [rsp] @@ -10157,7 +10697,7 @@ LBB2_70: QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 JNE LBB2_70 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] + QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] JMP LBB2_135 LBB2_72: @@ -10210,7 +10750,7 @@ LBB2_78: WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x046a3b44 // cmp r13d, dword [rdx + 4] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x086a3b44 // cmp r13d, dword [rdx + 8] LONG $0xd6940f41 // sete r14b LONG $0x0c6a3b44 // cmp r13d, dword [rdx + 12] @@ -10220,9 +10760,9 @@ LBB2_78: LONG $0x146a3b44 // cmp r13d, dword [rdx + 20] LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x186a3b44 // cmp r13d, dword [rdx + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x1c6a3b44 // cmp r13d, dword [rdx + 28] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x206a3b44 // cmp r13d, dword [rdx + 32] QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x246a3b44 // cmp r13d, dword [rdx + 36] @@ -10232,13 +10772,13 @@ LBB2_78: LONG $0x2c6a3b44 // cmp r13d, dword [rdx + 44] LONG $0xd1940f41 // sete r9b LONG $0x306a3b44 // cmp r13d, dword [rdx + 48] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x346a3b44 // cmp r13d, dword [rdx + 52] LONG $0xd4940f41 // sete r12b LONG $0x386a3b44 // cmp r13d, dword [rdx + 56] QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x3c6a3b44 // cmp r13d, dword [rdx + 60] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x406a3b44 // cmp r13d, dword [rdx + 64] LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x446a3b44 // cmp r13d, dword [rdx + 68] @@ -10258,107 +10798,106 @@ LBB2_78: LONG $0x606a3b44 // cmp r13d, dword [rdx + 96] LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] LONG $0x646a3b44 // cmp r13d, dword [rdx + 100] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x686a3b44 // cmp r13d, dword [rdx + 104] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0x686a3b44 // cmp r13d, dword [rdx + 104] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0x6c6a3b44 // cmp r13d, dword [rdx + 108] LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0x706a3b44 // cmp r13d, dword [rdx + 112] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x746a3b44 // cmp r13d, dword [rdx + 116] LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0x746a3b44 // cmp r13d, dword [rdx + 116] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0x786a3b44 // cmp r13d, dword [rdx + 120] LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0x7c6a3b44 // cmp r13d, dword [rdx + 124] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000c024940244 // add r10b, byte [rsp + 192] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil QUAD $0x000000d024b40240 // add sil, byte [rsp + 208] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x000000a0249cb60f // movzx ebx, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] - WORD $0xc689 // mov esi, eax - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc789 // mov edi, eax - LONG $0x24048b48 // mov rax, qword [rsp] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b + WORD $0x0845; BYTE $0xdc // or r12b, r11b LONG $0x24348b48 // mov rsi, qword [rsp] - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 + QUAD $0x0000b02484b60f44; BYTE $0x00 // movzx r8d, byte [rsp + 176] + LONG $0x06e0c041 // shl r8b, 6 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xdb00 // add bl, bl + LONG $0x48245c02 // add bl, byte [rsp + 72] + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8840; BYTE $0x3e // mov byte [rsi], dil + LONG $0x247cb60f; BYTE $0x40 // movzx edi, byte [rsp + 64] + LONG $0x06e7c040 // shl dil, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - WORD $0x4e88; BYTE $0x01 // mov byte [rsi + 1], cl WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0x4688; BYTE $0x01 // mov byte [rsi + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xc000 // add al, al LONG $0x18244402 // add al, byte [rsp + 24] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - WORD $0xcb08 // or bl, cl LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x5e88; BYTE $0x03 // mov byte [rsi + 3], bl + WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 LONG $0x04c68348 // add rsi, 4 LONG $0x24348948 // mov qword [rsp], rsi @@ -10370,22 +10909,22 @@ LBB2_78: LBB2_80: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB2_150 + JNE LBB2_151 LBB2_82: WORD $0xff31 // xor edi, edi - JMP LBB2_152 + JMP LBB2_153 LBB2_83: - LONG $0x2eb70f44 // movzx r13d, word [rsi] - LONG $0x1f728d4d // lea r14, [r10 + 31] + LONG $0x36b70f44 // movzx r14d, word [rsi] + LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 - LONG $0xf2490f4d // cmovns r14, r10 + LONG $0xfa490f4d // cmovns r15, r10 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -10396,7 +10935,7 @@ LBB2_83: LONG $0x241c8b4c // mov r11, qword [rsp] LBB2_85: - LONG $0x2a3b4466 // cmp r13w, word [rdx] + LONG $0x323b4466 // cmp r14w, word [rdx] LONG $0x02528d48 // lea rdx, [rdx + 2] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl @@ -10420,121 +10959,129 @@ LBB2_85: LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB2_87: - LONG $0x05fec149 // sar r14, 5 - LONG $0x20fa8349 // cmp r10, 32 + LONG $0x05ffc149 // sar r15, 5 + LONG $0x20fa8349 // cmp r10, 32 JL LBB2_138 - LONG $0x08fe8349 // cmp r14, 8 - QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x0000009824b4894c // mov qword [rsp + 152], r14 + LONG $0x08ff8349 // cmp r15, 8 + LONG $0x24748944; BYTE $0x08 // mov dword [rsp + 8], r14d + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 JB LBB2_91 - WORD $0x894c; BYTE $0xf0 // mov rax, r14 - LONG $0x06e0c148 // shl rax, 6 - WORD $0x0148; BYTE $0xd0 // add rax, rdx - LONG $0x24043948 // cmp qword [rsp], rax - JAE LBB2_191 - LONG $0x24048b48 // mov rax, qword [rsp] - LONG $0xb0048d4a // lea rax, [rax + 4*r14] - WORD $0x3948; BYTE $0xd0 // cmp rax, rdx - JBE LBB2_191 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 + LONG $0x06e0c148 // shl rax, 6 + WORD $0x0148; BYTE $0xd0 // add rax, rdx + LONG $0x24043948 // cmp qword [rsp], rax + JAE LBB2_188 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0xb8048d4a // lea rax, [rax + 4*r15] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + JBE LBB2_188 LBB2_91: WORD $0xc031 // xor eax, eax LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax WORD $0x8948; BYTE $0xd6 // mov rsi, rdx LONG $0x24048b48 // mov rax, qword [rsp] - LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax LBB2_92: - LONG $0x24742b4c; BYTE $0x20 // sub r14, qword [rsp + 32] - QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 + LONG $0x247c2b4c; BYTE $0x20 // sub r15, qword [rsp + 32] + QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 LBB2_93: - WORD $0x8949; BYTE $0xf3 // mov r11, rsi - LONG $0x2e3b4466 // cmp r13w, word [rsi] + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0x363b4466 // cmp r14w, word [rsi] QUAD $0x000000c02494940f // sete byte [rsp + 192] - LONG $0x6e3b4466; BYTE $0x02 // cmp r13w, word [rsi + 2] + LONG $0x763b4466; BYTE $0x02 // cmp r14w, word [rsi + 2] LONG $0xd7940f40 // sete dil - LONG $0x6e3b4466; BYTE $0x04 // cmp r13w, word [rsi + 4] - LONG $0xd6940f41 // sete r14b - LONG $0x6e3b4466; BYTE $0x06 // cmp r13w, word [rsi + 6] - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x6e3b4466; BYTE $0x08 // cmp r13w, word [rsi + 8] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x6e3b4466; BYTE $0x0a // cmp r13w, word [rsi + 10] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x6e3b4466; BYTE $0x0c // cmp r13w, word [rsi + 12] + LONG $0x763b4466; BYTE $0x04 // cmp r14w, word [rsi + 4] + LONG $0xd7940f41 // sete r15b + LONG $0x763b4466; BYTE $0x06 // cmp r14w, word [rsi + 6] + LONG $0xd5940f41 // sete r13b + LONG $0x763b4466; BYTE $0x08 // cmp r14w, word [rsi + 8] + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x763b4466; BYTE $0x0a // cmp r14w, word [rsi + 10] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0x763b4466; BYTE $0x0c // cmp r14w, word [rsi + 12] WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x6e3b4466; BYTE $0x0e // cmp r13w, word [rsi + 14] - LONG $0xd2940f41 // sete r10b - LONG $0x6e3b4466; BYTE $0x10 // cmp r13w, word [rsi + 16] + LONG $0x763b4466; BYTE $0x0e // cmp r14w, word [rsi + 14] + LONG $0xd3940f41 // sete r11b + LONG $0x763b4466; BYTE $0x10 // cmp r14w, word [rsi + 16] QUAD $0x000000d02494940f // sete byte [rsp + 208] - LONG $0x6e3b4466; BYTE $0x12 // cmp r13w, word [rsi + 18] + LONG $0x763b4466; BYTE $0x12 // cmp r14w, word [rsi + 18] WORD $0x940f; BYTE $0xd1 // sete cl - LONG $0x6e3b4466; BYTE $0x14 // cmp r13w, word [rsi + 20] + LONG $0x763b4466; BYTE $0x14 // cmp r14w, word [rsi + 20] WORD $0x940f; BYTE $0xd2 // sete dl - LONG $0x6e3b4466; BYTE $0x16 // cmp r13w, word [rsi + 22] + LONG $0x763b4466; BYTE $0x16 // cmp r14w, word [rsi + 22] LONG $0xd6940f40 // sete sil - LONG $0x6b3b4566; BYTE $0x18 // cmp r13w, word [r11 + 24] + LONG $0x723b4566; BYTE $0x18 // cmp r14w, word [r10 + 24] LONG $0xd1940f41 // sete r9b - LONG $0x6b3b4566; BYTE $0x1a // cmp r13w, word [r11 + 26] + LONG $0x723b4566; BYTE $0x1a // cmp r14w, word [r10 + 26] LONG $0xd4940f41 // sete r12b - LONG $0x6b3b4566; BYTE $0x1c // cmp r13w, word [r11 + 28] + LONG $0x723b4566; BYTE $0x1c // cmp r14w, word [r10 + 28] QUAD $0x000000b02494940f // sete byte [rsp + 176] - LONG $0x6b3b4566; BYTE $0x1e // cmp r13w, word [r11 + 30] + LONG $0x723b4566; BYTE $0x1e // cmp r14w, word [r10 + 30] LONG $0xd0940f41 // sete r8b - LONG $0x6b3b4566; BYTE $0x20 // cmp r13w, word [r11 + 32] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x6b3b4566; BYTE $0x22 // cmp r13w, word [r11 + 34] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x6b3b4566; BYTE $0x24 // cmp r13w, word [r11 + 36] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x6b3b4566; BYTE $0x26 // cmp r13w, word [r11 + 38] + LONG $0x723b4566; BYTE $0x20 // cmp r14w, word [r10 + 32] LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x6b3b4566; BYTE $0x28 // cmp r13w, word [r11 + 40] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x6b3b4566; BYTE $0x2a // cmp r13w, word [r11 + 42] + LONG $0x723b4566; BYTE $0x22 // cmp r14w, word [r10 + 34] + QUAD $0x000000a02494940f // sete byte [rsp + 160] + LONG $0x723b4566; BYTE $0x24 // cmp r14w, word [r10 + 36] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x723b4566; BYTE $0x26 // cmp r14w, word [r10 + 38] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x723b4566; BYTE $0x28 // cmp r14w, word [r10 + 40] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x723b4566; BYTE $0x2a // cmp r14w, word [r10 + 42] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x723b4566; BYTE $0x2c // cmp r14w, word [r10 + 44] LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x6b3b4566; BYTE $0x2c // cmp r13w, word [r11 + 44] + LONG $0x723b4566; BYTE $0x2e // cmp r14w, word [r10 + 46] + LONG $0xd6940f41 // sete r14b + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x30 // cmp bx, word [r10 + 48] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x32 // cmp bx, word [r10 + 50] LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x6b3b4566; BYTE $0x2e // cmp r13w, word [r11 + 46] - LONG $0xd7940f41 // sete r15b - LONG $0x6b3b4566; BYTE $0x30 // cmp r13w, word [r11 + 48] - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x6b3b4566; BYTE $0x32 // cmp r13w, word [r11 + 50] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x6b3b4566; BYTE $0x34 // cmp r13w, word [r11 + 52] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x34 // cmp bx, word [r10 + 52] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x6b3b4566; BYTE $0x36 // cmp r13w, word [r11 + 54] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x6b3b4566; BYTE $0x38 // cmp r13w, word [r11 + 56] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x6b3b4566; BYTE $0x3a // cmp r13w, word [r11 + 58] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x36 // cmp bx, word [r10 + 54] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x38 // cmp bx, word [r10 + 56] + LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x3a // cmp bx, word [r10 + 58] LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x6b3b4566; BYTE $0x3c // cmp r13w, word [r11 + 60] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x3c // cmp bx, word [r10 + 60] LONG $0x2414940f // sete byte [rsp] - LONG $0x6b3b4566; BYTE $0x3e // cmp r13w, word [r11 + 62] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x3e // cmp bx, word [r10 + 62] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0x0040; BYTE $0xff // add dil, dil QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e2c041 // shl r10b, 7 - WORD $0x0841; BYTE $0xc2 // or r10b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + LONG $0x07e3c041 // shl r11b, 7 + WORD $0x0841; BYTE $0xc3 // or r11b, al + LONG $0x02e7c041 // shl r15b, 2 + WORD $0x0841; BYTE $0xff // or r15b, dil WORD $0xc900 // add cl, cl LONG $0xd0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 208] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + LONG $0x03e5c041 // shl r13b, 3 + WORD $0x0845; BYTE $0xfd // or r13b, r15b WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0xca08 // or dl, cl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al + WORD $0x0844; BYTE $0xe9 // or cl, r13b WORD $0xcf89 // mov edi, ecx LONG $0x03e6c040 // shl sil, 3 WORD $0x0840; BYTE $0xd6 // or sil, dl - LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0840; BYTE $0xf9 // or cl, dil LONG $0x04e1c041 // shl r9b, 4 @@ -10545,49 +11092,48 @@ LBB2_93: WORD $0xe2c0; BYTE $0x06 // shl dl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xca // or r10b, cl + WORD $0x0841; BYTE $0xcb // or r11b, cl WORD $0x0845; BYTE $0xe0 // or r8b, r12b + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xc000 // add al, al + LONG $0x60244402 // add al, byte [rsp + 96] LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] - WORD $0xc900 // add cl, cl - LONG $0x48244c02 // add cl, byte [rsp + 72] - WORD $0xca89 // mov edx, ecx - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl + WORD $0xc108 // or cl, al WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x68 // movzx ecx, byte [rsp + 104] + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl WORD $0xce89 // mov esi, ecx - LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] - WORD $0x8844; BYTE $0x11 // mov byte [rcx], r10b - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] + WORD $0x8844; BYTE $0x19 // mov byte [rcx], r11b + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xd7 // or r15b, dl + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xd6 // or r14b, dl LONG $0x01418844 // mov byte [rcx + 1], r8b - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0x0841; BYTE $0xf6 // or r14b, sil + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] WORD $0xc000 // add al, al - LONG $0x18244402 // add al, byte [rsp + 24] + LONG $0x28244402 // add al, byte [rsp + 40] WORD $0xc289 // mov edx, eax LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax @@ -10599,19 +11145,20 @@ LBB2_93: WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xd308 // or bl, dl WORD $0xc308 // or bl, al - LONG $0x02798844 // mov byte [rcx + 2], r15b + LONG $0x02718844 // mov byte [rcx + 2], r14b + LONG $0x24748b44; BYTE $0x08 // mov r14d, dword [rsp + 8] WORD $0x5988; BYTE $0x03 // mov byte [rcx + 3], bl - LONG $0x40738d49 // lea rsi, [r11 + 64] + LONG $0x40728d49 // lea rsi, [r10 + 64] LONG $0x04c18348 // add rcx, 4 - LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx + LONG $0x244c8948; BYTE $0x10 // mov qword [rsp + 16], rcx QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 JNE LBB2_93 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x0000009824b48b4c // mov r14, qword [rsp + 152] + QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] JMP LBB2_139 LBB2_95: - LONG $0x2eb70f44 // movzx r13d, word [rsi] + LONG $0x36b70f44 // movzx r14d, word [rsi] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xfa490f4d // cmovns r15, r10 @@ -10625,7 +11172,7 @@ LBB2_95: LONG $0x241c8b4c // mov r11, qword [rsp] LBB2_97: - LONG $0x2a3b4466 // cmp r13w, word [rdx] + LONG $0x323b4466 // cmp r14w, word [rdx] LONG $0x02528d48 // lea rdx, [rdx + 2] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0xdbf6 // neg bl @@ -10649,121 +11196,129 @@ LBB2_97: LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB2_99: - LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fa8349 // cmp r10, 32 - JL LBB2_142 - LONG $0x08ff8349 // cmp r15, 8 - QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 + LONG $0x05ffc149 // sar r15, 5 + LONG $0x20fa8349 // cmp r10, 32 + JL LBB2_143 + LONG $0x08ff8349 // cmp r15, 8 + LONG $0x24748944; BYTE $0x08 // mov dword [rsp + 8], r14d + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x0000009824bc894c // mov qword [rsp + 152], r15 JB LBB2_103 - WORD $0x894c; BYTE $0xf8 // mov rax, r15 - LONG $0x06e0c148 // shl rax, 6 - WORD $0x0148; BYTE $0xd0 // add rax, rdx - LONG $0x24043948 // cmp qword [rsp], rax - JAE LBB2_194 - LONG $0x24048b48 // mov rax, qword [rsp] - LONG $0xb8048d4a // lea rax, [rax + 4*r15] - WORD $0x3948; BYTE $0xd0 // cmp rax, rdx - JBE LBB2_194 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 + LONG $0x06e0c148 // shl rax, 6 + WORD $0x0148; BYTE $0xd0 // add rax, rdx + LONG $0x24043948 // cmp qword [rsp], rax + JAE LBB2_191 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0xb8048d4a // lea rax, [rax + 4*r15] + WORD $0x3948; BYTE $0xd0 // cmp rax, rdx + JBE LBB2_191 LBB2_103: WORD $0xc031 // xor eax, eax LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax WORD $0x8948; BYTE $0xd6 // mov rsi, rdx - LONG $0x24348b4c // mov r14, qword [rsp] + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax LBB2_104: - LONG $0x2474894c; BYTE $0x08 // mov qword [rsp + 8], r14 LONG $0x247c2b4c; BYTE $0x20 // sub r15, qword [rsp + 32] QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 LBB2_105: - WORD $0x8949; BYTE $0xf3 // mov r11, rsi - LONG $0x2e3b4466 // cmp r13w, word [rsi] + WORD $0x8949; BYTE $0xf2 // mov r10, rsi + LONG $0x363b4466 // cmp r14w, word [rsi] QUAD $0x000000c02494940f // sete byte [rsp + 192] - LONG $0x6e3b4466; BYTE $0x02 // cmp r13w, word [rsi + 2] + LONG $0x763b4466; BYTE $0x02 // cmp r14w, word [rsi + 2] LONG $0xd7940f40 // sete dil - LONG $0x6e3b4466; BYTE $0x04 // cmp r13w, word [rsi + 4] - LONG $0xd6940f41 // sete r14b - LONG $0x6e3b4466; BYTE $0x06 // cmp r13w, word [rsi + 6] - QUAD $0x000000a02494940f // sete byte [rsp + 160] - LONG $0x6e3b4466; BYTE $0x08 // cmp r13w, word [rsi + 8] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] - LONG $0x6e3b4466; BYTE $0x0a // cmp r13w, word [rsi + 10] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] - LONG $0x6e3b4466; BYTE $0x0c // cmp r13w, word [rsi + 12] + LONG $0x763b4466; BYTE $0x04 // cmp r14w, word [rsi + 4] + LONG $0xd7940f41 // sete r15b + LONG $0x763b4466; BYTE $0x06 // cmp r14w, word [rsi + 6] + LONG $0xd5940f41 // sete r13b + LONG $0x763b4466; BYTE $0x08 // cmp r14w, word [rsi + 8] + QUAD $0x000000802494940f // sete byte [rsp + 128] + LONG $0x763b4466; BYTE $0x0a // cmp r14w, word [rsi + 10] + LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + LONG $0x763b4466; BYTE $0x0c // cmp r14w, word [rsi + 12] WORD $0x940f; BYTE $0xd0 // sete al - LONG $0x6e3b4466; BYTE $0x0e // cmp r13w, word [rsi + 14] - LONG $0xd2940f41 // sete r10b - LONG $0x6e3b4466; BYTE $0x10 // cmp r13w, word [rsi + 16] + LONG $0x763b4466; BYTE $0x0e // cmp r14w, word [rsi + 14] + LONG $0xd3940f41 // sete r11b + LONG $0x763b4466; BYTE $0x10 // cmp r14w, word [rsi + 16] QUAD $0x000000d02494940f // sete byte [rsp + 208] - LONG $0x6e3b4466; BYTE $0x12 // cmp r13w, word [rsi + 18] + LONG $0x763b4466; BYTE $0x12 // cmp r14w, word [rsi + 18] WORD $0x940f; BYTE $0xd1 // sete cl - LONG $0x6e3b4466; BYTE $0x14 // cmp r13w, word [rsi + 20] + LONG $0x763b4466; BYTE $0x14 // cmp r14w, word [rsi + 20] WORD $0x940f; BYTE $0xd2 // sete dl - LONG $0x6e3b4466; BYTE $0x16 // cmp r13w, word [rsi + 22] + LONG $0x763b4466; BYTE $0x16 // cmp r14w, word [rsi + 22] LONG $0xd6940f40 // sete sil - LONG $0x6b3b4566; BYTE $0x18 // cmp r13w, word [r11 + 24] + LONG $0x723b4566; BYTE $0x18 // cmp r14w, word [r10 + 24] LONG $0xd1940f41 // sete r9b - LONG $0x6b3b4566; BYTE $0x1a // cmp r13w, word [r11 + 26] + LONG $0x723b4566; BYTE $0x1a // cmp r14w, word [r10 + 26] LONG $0xd4940f41 // sete r12b - LONG $0x6b3b4566; BYTE $0x1c // cmp r13w, word [r11 + 28] + LONG $0x723b4566; BYTE $0x1c // cmp r14w, word [r10 + 28] QUAD $0x000000b02494940f // sete byte [rsp + 176] - LONG $0x6b3b4566; BYTE $0x1e // cmp r13w, word [r11 + 30] + LONG $0x723b4566; BYTE $0x1e // cmp r14w, word [r10 + 30] LONG $0xd0940f41 // sete r8b - LONG $0x6b3b4566; BYTE $0x20 // cmp r13w, word [r11 + 32] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] - LONG $0x6b3b4566; BYTE $0x22 // cmp r13w, word [r11 + 34] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] - LONG $0x6b3b4566; BYTE $0x24 // cmp r13w, word [r11 + 36] - QUAD $0x000000802494940f // sete byte [rsp + 128] - LONG $0x6b3b4566; BYTE $0x26 // cmp r13w, word [r11 + 38] + LONG $0x723b4566; BYTE $0x20 // cmp r14w, word [r10 + 32] LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] - LONG $0x6b3b4566; BYTE $0x28 // cmp r13w, word [r11 + 40] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] - LONG $0x6b3b4566; BYTE $0x2a // cmp r13w, word [r11 + 42] + LONG $0x723b4566; BYTE $0x22 // cmp r14w, word [r10 + 34] + QUAD $0x000000a02494940f // sete byte [rsp + 160] + LONG $0x723b4566; BYTE $0x24 // cmp r14w, word [r10 + 36] + LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + LONG $0x723b4566; BYTE $0x26 // cmp r14w, word [r10 + 38] + LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + LONG $0x723b4566; BYTE $0x28 // cmp r14w, word [r10 + 40] + LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + LONG $0x723b4566; BYTE $0x2a // cmp r14w, word [r10 + 42] + LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + LONG $0x723b4566; BYTE $0x2c // cmp r14w, word [r10 + 44] LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] - LONG $0x6b3b4566; BYTE $0x2c // cmp r13w, word [r11 + 44] + LONG $0x723b4566; BYTE $0x2e // cmp r14w, word [r10 + 46] + LONG $0xd6940f41 // sete r14b + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x30 // cmp bx, word [r10 + 48] + LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x32 // cmp bx, word [r10 + 50] LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] - LONG $0x6b3b4566; BYTE $0x2e // cmp r13w, word [r11 + 46] - LONG $0xd7940f41 // sete r15b - LONG $0x6b3b4566; BYTE $0x30 // cmp r13w, word [r11 + 48] - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] - LONG $0x6b3b4566; BYTE $0x32 // cmp r13w, word [r11 + 50] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0x6b3b4566; BYTE $0x34 // cmp r13w, word [r11 + 52] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x34 // cmp bx, word [r10 + 52] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] - LONG $0x6b3b4566; BYTE $0x36 // cmp r13w, word [r11 + 54] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] - LONG $0x6b3b4566; BYTE $0x38 // cmp r13w, word [r11 + 56] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0x6b3b4566; BYTE $0x3a // cmp r13w, word [r11 + 58] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x36 // cmp bx, word [r10 + 54] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x38 // cmp bx, word [r10 + 56] + LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x3a // cmp bx, word [r10 + 58] LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] - LONG $0x6b3b4566; BYTE $0x3c // cmp r13w, word [r11 + 60] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x3c // cmp bx, word [r10 + 60] LONG $0x2414940f // sete byte [rsp] - LONG $0x6b3b4566; BYTE $0x3e // cmp r13w, word [r11 + 62] + LONG $0x08245c8b // mov ebx, dword [rsp + 8] + LONG $0x5a3b4166; BYTE $0x3e // cmp bx, word [r10 + 62] WORD $0x940f; BYTE $0xd3 // sete bl WORD $0x0040; BYTE $0xff // add dil, dil QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e2c041 // shl r10b, 7 - WORD $0x0841; BYTE $0xc2 // or r10b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + LONG $0x07e3c041 // shl r11b, 7 + WORD $0x0841; BYTE $0xc3 // or r11b, al + LONG $0x02e7c041 // shl r15b, 2 + WORD $0x0841; BYTE $0xff // or r15b, dil WORD $0xc900 // add cl, cl LONG $0xd0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 208] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + LONG $0x03e5c041 // shl r13b, 3 + WORD $0x0845; BYTE $0xfd // or r13b, r15b WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0xca08 // or dl, cl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al + WORD $0x0844; BYTE $0xe9 // or cl, r13b WORD $0xcf89 // mov edi, ecx LONG $0x03e6c040 // shl sil, 3 WORD $0x0840; BYTE $0xd6 // or sil, dl - LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0840; BYTE $0xf9 // or cl, dil LONG $0x04e1c041 // shl r9b, 4 @@ -10774,49 +11329,48 @@ LBB2_105: WORD $0xe2c0; BYTE $0x06 // shl dl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xca // or r10b, cl + WORD $0x0841; BYTE $0xcb // or r11b, cl WORD $0x0845; BYTE $0xe0 // or r8b, r12b + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xc000 // add al, al + LONG $0x60244402 // add al, byte [rsp + 96] LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] - WORD $0xc900 // add cl, cl - LONG $0x48244c02 // add cl, byte [rsp + 72] - WORD $0xca89 // mov edx, ecx - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl + WORD $0xc108 // or cl, al WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x68 // movzx ecx, byte [rsp + 104] + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl WORD $0xce89 // mov esi, ecx - LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] - WORD $0x8844; BYTE $0x11 // mov byte [rcx], r10b - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] + WORD $0x8844; BYTE $0x19 // mov byte [rcx], r11b + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xd7 // or r15b, dl + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xd6 // or r14b, dl LONG $0x01418844 // mov byte [rcx + 1], r8b - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0x0841; BYTE $0xf6 // or r14b, sil + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] WORD $0xc000 // add al, al - LONG $0x18244402 // add al, byte [rsp + 24] + LONG $0x28244402 // add al, byte [rsp + 40] WORD $0xc289 // mov edx, eax LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax @@ -10828,17 +11382,17 @@ LBB2_105: WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xd308 // or bl, dl WORD $0xc308 // or bl, al - LONG $0x02798844 // mov byte [rcx + 2], r15b + LONG $0x02718844 // mov byte [rcx + 2], r14b + LONG $0x24748b44; BYTE $0x08 // mov r14d, dword [rsp + 8] WORD $0x5988; BYTE $0x03 // mov byte [rcx + 3], bl - LONG $0x40738d49 // lea rsi, [r11 + 64] + LONG $0x40728d49 // lea rsi, [r10 + 64] LONG $0x04c18348 // add rcx, 4 - LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx + LONG $0x244c8948; BYTE $0x10 // mov qword [rsp + 16], rcx QUAD $0x0000008824848348; BYTE $0xff // add qword [rsp + 136], -1 JNE LBB2_105 QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] - LONG $0x24748b4c; BYTE $0x08 // mov r14, qword [rsp + 8] - JMP LBB2_143 + JMP LBB2_144 LBB2_107: WORD $0x8b4c; BYTE $0x2e // mov r13, qword [rsi] @@ -10890,7 +11444,7 @@ LBB2_113: WORD $0x3b4c; BYTE $0x2a // cmp r13, qword [rdx] QUAD $0x000000c02494940f // sete byte [rsp + 192] LONG $0x086a3b4c // cmp r13, qword [rdx + 8] - LONG $0xd7940f40 // sete dil + LONG $0xd2940f41 // sete r10b LONG $0x106a3b4c // cmp r13, qword [rdx + 16] LONG $0xd6940f41 // sete r14b LONG $0x186a3b4c // cmp r13, qword [rdx + 24] @@ -10900,9 +11454,9 @@ LBB2_113: LONG $0x286a3b4c // cmp r13, qword [rdx + 40] LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] LONG $0x306a3b4c // cmp r13, qword [rdx + 48] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x940f; BYTE $0xd3 // sete bl LONG $0x386a3b4c // cmp r13, qword [rdx + 56] - LONG $0xd3940f41 // sete r11b + LONG $0xd7940f40 // sete dil LONG $0x406a3b4c // cmp r13, qword [rdx + 64] QUAD $0x000000d02494940f // sete byte [rsp + 208] LONG $0x486a3b4c // cmp r13, qword [rdx + 72] @@ -10912,13 +11466,13 @@ LBB2_113: LONG $0x586a3b4c // cmp r13, qword [rdx + 88] LONG $0xd1940f41 // sete r9b LONG $0x606a3b4c // cmp r13, qword [rdx + 96] - LONG $0xd2940f41 // sete r10b + LONG $0xd3940f41 // sete r11b LONG $0x686a3b4c // cmp r13, qword [rdx + 104] LONG $0xd4940f41 // sete r12b LONG $0x706a3b4c // cmp r13, qword [rdx + 112] QUAD $0x000000b02494940f // sete byte [rsp + 176] LONG $0x786a3b4c // cmp r13, qword [rdx + 120] - WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x940f; BYTE $0xd0 // sete al LONG $0x80aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 128] LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] LONG $0x88aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 136] @@ -10938,107 +11492,106 @@ LBB2_113: LONG $0xc0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 192] LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] LONG $0xc8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 200] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] - LONG $0xd0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 208] LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + LONG $0xd0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 208] + LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] LONG $0xd8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 216] LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] LONG $0xe0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 224] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] - LONG $0xe8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 232] LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + LONG $0xe8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 232] + LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] LONG $0xf0aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 240] LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] LONG $0xf8aa3b4c; WORD $0x0000; BYTE $0x00 // cmp r13, qword [rdx + 248] - WORD $0x940f; BYTE $0xd3 // sete bl - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000c024940244 // add r10b, byte [rsp + 192] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xdf // or dil, bl LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + WORD $0x0845; BYTE $0xd6 // or r14b, r10b WORD $0x0040; BYTE $0xf6 // add sil, sil QUAD $0x000000d024b40240 // add sil, byte [rsp + 208] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b - WORD $0xc789 // mov edi, eax + QUAD $0x000000a0249cb60f // movzx ebx, byte [rsp + 160] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + WORD $0x8941; BYTE $0xda // mov r10d, ebx LONG $0x02e0c041 // shl r8b, 2 WORD $0x0841; BYTE $0xf0 // or r8b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf8 // or al, dil - WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + WORD $0xde89 // mov esi, ebx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf8 // or al, dil - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x245cb60f; BYTE $0x58 // movzx ebx, byte [rsp + 88] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xf3 // or bl, sil + LONG $0x04e3c041 // shl r11b, 4 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] - LONG $0x06e6c040 // shl sil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf1 // or cl, sil - WORD $0x0841; BYTE $0xc3 // or r11b, al - WORD $0x0844; BYTE $0xe1 // or cl, r12b - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0x0845; BYTE $0xdc // or r12b, r11b + LONG $0x24348b48 // mov rsi, qword [rsp] + QUAD $0x0000b02484b60f44; BYTE $0x00 // movzx r8d, byte [rsp + 176] + LONG $0x06e0c041 // shl r8b, 6 + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0x0840; BYTE $0xdf // or dil, bl + WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x245cb60f; BYTE $0x78 // movzx ebx, byte [rsp + 120] + WORD $0xdb00 // add bl, bl + LONG $0x48245c02 // add bl, byte [rsp + 72] + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + WORD $0x8840; BYTE $0x3e // mov byte [rsi], dil + LONG $0x247cb60f; BYTE $0x40 // movzx edi, byte [rsp + 64] + LONG $0x06e7c040 // shl dil, 6 + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xff // or r15b, dil + WORD $0x4688; BYTE $0x01 // mov byte [rsi + 1], al + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] - WORD $0xc689 // mov esi, eax - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + LONG $0x18244402 // add al, byte [rsp + 24] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0840; BYTE $0xf0 // or al, sil - WORD $0xc789 // mov edi, eax - LONG $0x24048b48 // mov rax, qword [rsp] - WORD $0x8844; BYTE $0x18 // mov byte [rax], r11b - LONG $0x24348b48 // mov rsi, qword [rsp] - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xc7 // or r15b, al - WORD $0x4e88; BYTE $0x01 // mov byte [rsi + 1], cl - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xc000 // add al, al - LONG $0x18244402 // add al, byte [rsp + 24] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - WORD $0xcb08 // or bl, cl LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x5e88; BYTE $0x03 // mov byte [rsi + 3], bl + WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 LONG $0x04c68348 // add rsi, 4 LONG $0x24348948 // mov qword [rsp], rsi @@ -11050,16 +11603,16 @@ LBB2_113: LBB2_115: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB2_165 + JNE LBB2_166 LBB2_117: WORD $0xff31 // xor edi, edi - JMP LBB2_167 + JMP LBB2_168 LBB2_118: LONG $0x1f728d4d // lea r14, [r10 + 31] @@ -11078,7 +11631,9 @@ LBB2_118: LBB2_120: WORD $0x2e0f; BYTE $0x02 // ucomiss xmm0, dword [rdx] LONG $0x04528d48 // lea rdx, [rdx + 4] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd3 // sete bl + WORD $0xcb20 // and bl, cl WORD $0xdbf6 // neg bl LONG $0x07708d48 // lea rsi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax @@ -11102,189 +11657,280 @@ LBB2_120: LBB2_122: LONG $0x05fec149 // sar r14, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB2_146 + JL LBB2_147 LONG $0x04fe8349 // cmp r14, 4 JB LBB2_126 WORD $0x894c; BYTE $0xf0 // mov rax, r14 LONG $0x07e0c148 // shl rax, 7 WORD $0x0148; BYTE $0xd0 // add rax, rdx LONG $0x24043948 // cmp qword [rsp], rax - JAE LBB2_197 + JAE LBB2_194 LONG $0x24048b48 // mov rax, qword [rsp] LONG $0xb0048d4a // lea rax, [rax + 4*r14] WORD $0x3948; BYTE $0xd0 // cmp rax, rdx - JBE LBB2_197 + JBE LBB2_194 LBB2_126: WORD $0x3145; BYTE $0xc0 // xor r8d, r8d WORD $0x8948; BYTE $0xd3 // mov rbx, rdx - LONG $0x241c8b4c // mov r11, qword [rsp] + LONG $0x243c8b4c // mov r15, qword [rsp] LBB2_127: - LONG $0x241c894c // mov qword [rsp], r11 - QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x0000008824b4894c // mov qword [rsp + 136], r14 - WORD $0x294d; BYTE $0xc6 // sub r14, r8 - QUAD $0x000000c024b4894c // mov qword [rsp + 192], r14 + LONG $0x247c894c; BYTE $0x08 // mov qword [rsp + 8], r15 + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x000000f024b4894c // mov qword [rsp + 240], r14 + WORD $0x294d; BYTE $0xc6 // sub r14, r8 + QUAD $0x0000009824b4894c // mov qword [rsp + 152], r14 LBB2_128: WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - QUAD $0x000000a02494940f // sete byte [rsp + 160] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + WORD $0x8941; BYTE $0xcd // mov r13d, ecx LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] - LONG $0xd0940f41 // sete r8b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al LONG $0x08432e0f // ucomiss xmm0, dword [rbx + 8] - LONG $0xd6940f41 // sete r14b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x0c432e0f // ucomiss xmm0, dword [rbx + 12] - LONG $0xd5940f41 // sete r13b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x10432e0f // ucomiss xmm0, dword [rbx + 16] - LONG $0x2454940f; BYTE $0x70 // sete byte [rsp + 112] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x14432e0f // ucomiss xmm0, dword [rbx + 20] - LONG $0x2454940f; BYTE $0x58 // sete byte [rsp + 88] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x18432e0f // ucomiss xmm0, dword [rbx + 24] - WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x1c432e0f // ucomiss xmm0, dword [rbx + 28] - LONG $0xd3940f41 // sete r11b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + WORD $0x0c88; BYTE $0x24 // mov byte [rsp], cl LONG $0x20432e0f // ucomiss xmm0, dword [rbx + 32] - QUAD $0x000000b02494940f // sete byte [rsp + 176] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x24432e0f // ucomiss xmm0, dword [rbx + 36] - WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x28432e0f // ucomiss xmm0, dword [rbx + 40] - LONG $0xd6940f40 // sete sil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x2c432e0f // ucomiss xmm0, dword [rbx + 44] - LONG $0xd7940f40 // sete dil + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x30432e0f // ucomiss xmm0, dword [rbx + 48] - LONG $0xd2940f41 // sete r10b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x34432e0f // ucomiss xmm0, dword [rbx + 52] - LONG $0xd4940f41 // sete r12b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x38432e0f // ucomiss xmm0, dword [rbx + 56] - LONG $0x2454940f; BYTE $0x78 // sete byte [rsp + 120] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl LONG $0x3c432e0f // ucomiss xmm0, dword [rbx + 60] - LONG $0xd1940f41 // sete r9b + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl LONG $0x40432e0f // ucomiss xmm0, dword [rbx + 64] - LONG $0x2454940f; BYTE $0x48 // sete byte [rsp + 72] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x44432e0f // ucomiss xmm0, dword [rbx + 68] - QUAD $0x000000d02494940f // sete byte [rsp + 208] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd4940f41 // sete r12b + WORD $0x2041; BYTE $0xc4 // and r12b, al LONG $0x48432e0f // ucomiss xmm0, dword [rbx + 72] - QUAD $0x000000802494940f // sete byte [rsp + 128] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl LONG $0x4c432e0f // ucomiss xmm0, dword [rbx + 76] - LONG $0x2454940f; BYTE $0x60 // sete byte [rsp + 96] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl LONG $0x50432e0f // ucomiss xmm0, dword [rbx + 80] - LONG $0x2454940f; BYTE $0x50 // sete byte [rsp + 80] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xd0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 208], cl LONG $0x54432e0f // ucomiss xmm0, dword [rbx + 84] - LONG $0x2454940f; BYTE $0x68 // sete byte [rsp + 104] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xc0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 192], cl LONG $0x58432e0f // ucomiss xmm0, dword [rbx + 88] - LONG $0x2454940f; BYTE $0x40 // sete byte [rsp + 64] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f41 // sete r14b + WORD $0x2041; BYTE $0xc6 // and r14b, al LONG $0x5c432e0f // ucomiss xmm0, dword [rbx + 92] - LONG $0xd7940f41 // sete r15b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd0940f41 // sete r8b + WORD $0x2041; BYTE $0xc0 // and r8b, al LONG $0x60432e0f // ucomiss xmm0, dword [rbx + 96] - LONG $0x2454940f; BYTE $0x18 // sete byte [rsp + 24] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl LONG $0x64432e0f // ucomiss xmm0, dword [rbx + 100] - LONG $0x2454940f; BYTE $0x38 // sete byte [rsp + 56] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd3940f41 // sete r11b + WORD $0x2041; BYTE $0xc3 // and r11b, al LONG $0x68432e0f // ucomiss xmm0, dword [rbx + 104] - LONG $0x2454940f; BYTE $0x20 // sete byte [rsp + 32] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd2940f41 // sete r10b + WORD $0x2041; BYTE $0xc2 // and r10b, al LONG $0x6c432e0f // ucomiss xmm0, dword [rbx + 108] - LONG $0x2454940f; BYTE $0x28 // sete byte [rsp + 40] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd1940f41 // sete r9b + WORD $0x2041; BYTE $0xc1 // and r9b, al LONG $0x70432e0f // ucomiss xmm0, dword [rbx + 112] - LONG $0x2454940f; BYTE $0x10 // sete byte [rsp + 16] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f41 // sete r15b + WORD $0x2041; BYTE $0xc7 // and r15b, al LONG $0x74432e0f // ucomiss xmm0, dword [rbx + 116] - LONG $0x2454940f; BYTE $0x30 // sete byte [rsp + 48] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd1 // sete cl + WORD $0xc120 // and cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl LONG $0x78432e0f // ucomiss xmm0, dword [rbx + 120] - LONG $0x2454940f; BYTE $0x08 // sete byte [rsp + 8] + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd7940f40 // sete dil + WORD $0x2040; BYTE $0xc7 // and dil, al LONG $0x7c432e0f // ucomiss xmm0, dword [rbx + 124] - WORD $0x940f; BYTE $0xd1 // sete cl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000a024840244 // add r8b, byte [rsp + 160] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xc6 // or r14b, r8b + WORD $0x9b0f; BYTE $0xd0 // setnp al + LONG $0xd6940f40 // sete sil + WORD $0x2040; BYTE $0xc6 // and sil, al WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e6c040 // shl sil, 2 - WORD $0x0840; BYTE $0xd6 // or sil, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd0 // mov r8d, edx - LONG $0x03e7c040 // shl dil, 3 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xc2 // or dl, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x78 // movzx esi, byte [rsp + 120] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xf1 // or r9b, sil - WORD $0x0841; BYTE $0xd3 // or r11b, dl - WORD $0x0845; BYTE $0xe1 // or r9b, r12b - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] - WORD $0xc000 // add al, al - LONG $0x48244402 // add al, byte [rsp + 72] - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + WORD $0x8941; BYTE $0xd5 // mov r13d, edx + LONG $0x2454b60f; BYTE $0x38 // movzx edx, byte [rsp + 56] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xf2 // or dl, sil - LONG $0x24348b48 // mov rsi, qword [rsp] - WORD $0x8844; BYTE $0x1e // mov byte [rsi], r11b - LONG $0x247cb60f; BYTE $0x40 // movzx edi, byte [rsp + 64] - LONG $0x06e7c040 // shl dil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x014e8844 // mov byte [rsi + 1], r9b - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xc000 // add al, al - LONG $0x18244402 // add al, byte [rsp + 24] - WORD $0xc289 // mov edx, eax LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + WORD $0xc900 // add cl, cl + LONG $0x28244c02 // add cl, byte [rsp + 40] + LONG $0x2404b60f // movzx eax, byte [rsp] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax + WORD $0x0488; BYTE $0x24 // mov byte [rsp], al + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + WORD $0xe2c0; BYTE $0x04 // shl dl, 4 + WORD $0xca08 // or dl, cl + LONG $0x6cb60f44; WORD $0x6024 // movzx r13d, byte [rsp + 96] + LONG $0x05e5c041 // shl r13b, 5 + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + LONG $0x6cb60f44; WORD $0x7024 // movzx r13d, byte [rsp + 112] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xcd // or r13b, cl + WORD $0x0045; BYTE $0xe4 // add r12b, r12b + LONG $0x24640244; BYTE $0x48 // add r12b, byte [rsp + 72] + WORD $0x8944; BYTE $0xe1 // mov ecx, r12d + LONG $0x64b60f44; WORD $0x7824 // movzx r12d, byte [rsp + 120] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xcc // or r12b, cl + QUAD $0x000000b0248cb60f // movzx ecx, byte [rsp + 176] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + WORD $0x8941; BYTE $0xcc // mov r12d, ecx + QUAD $0x000000d0248cb60f // movzx ecx, byte [rsp + 208] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + LONG $0x24b60f44; BYTE $0x24 // movzx r12d, byte [rsp] + WORD $0x0841; BYTE $0xc4 // or r12b, al + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0xd108 // or cl, dl - WORD $0xc108 // or cl, al - LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl + LONG $0x06e6c041 // shl r14b, 6 + WORD $0x0841; BYTE $0xc6 // or r14b, al + WORD $0x0841; BYTE $0xd5 // or r13b, dl + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf0 // or r8b, r14b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x000000a0249c0244 // add r11b, byte [rsp + 160] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + LONG $0x04e7c041 // shl r15b, 4 + WORD $0x0845; BYTE $0xcf // or r15b, r9b + LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] + WORD $0x8844; BYTE $0x20 // mov byte [rax], r12b + QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x01688844 // mov byte [rax + 1], r13b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0844; BYTE $0xfe // or sil, r15b + LONG $0x02408844 // mov byte [rax + 2], r8b + LONG $0x03708840 // mov byte [rax + 3], sil LONG $0x80c38148; WORD $0x0000; BYTE $0x00 // add rbx, 128 - LONG $0x04c68348 // add rsi, 4 - LONG $0x24348948 // mov qword [rsp], rsi - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + LONG $0x04c08348 // add rax, 4 + LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax + QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 JNE LBB2_128 - LONG $0x241c8b4c // mov r11, qword [rsp] + LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - QUAD $0x0000008824b48b4c // mov r14, qword [rsp + 136] - JMP LBB2_147 + QUAD $0x000000f024b48b4c // mov r14, qword [rsp + 240] + JMP LBB2_148 LBB2_130: LONG $0x24048b48 // mov rax, qword [rsp] @@ -11294,7 +11940,7 @@ LBB2_130: LBB2_131: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 @@ -11305,7 +11951,7 @@ LBB2_131: WORD $0x3145; BYTE $0xc9 // xor r9d, r9d LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] -LBB2_155: +LBB2_156: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e343a46 // cmp r14b, byte [rsi + r9] WORD $0x940f; BYTE $0xd3 // sete bl @@ -11333,8 +11979,8 @@ LBB2_155: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB2_155 - JMP LBB2_158 + JNE LBB2_156 + JMP LBB2_159 LBB2_134: LONG $0x24048b48 // mov rax, qword [rsp] @@ -11344,75 +11990,112 @@ LBB2_134: LBB2_135: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB2_156 + JNE LBB2_157 LBB2_137: WORD $0x3145; BYTE $0xc9 // xor r9d, r9d LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 - JMP LBB2_160 + JE LBB2_177 + JMP LBB2_161 LBB2_138: LONG $0x24048b48 // mov rax, qword [rsp] - LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax WORD $0x8948; BYTE $0xd6 // mov rsi, rdx LBB2_139: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_176 - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xd6 // add r14, r10 - JNE LBB2_170 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - JMP LBB2_172 + LONG $0x05e7c149 // shl r15, 5 + WORD $0x394d; BYTE $0xd7 // cmp r15, r10 + JGE LBB2_177 + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + WORD $0x294d; BYTE $0xf8 // sub r8, r15 + WORD $0xf749; BYTE $0xd7 // not r15 + WORD $0x014d; BYTE $0xd7 // add r15, r10 + JE LBB2_146 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xff // xor r15d, r15d + LONG $0x245c8b4c; BYTE $0x10 // mov r11, qword [rsp + 16] LBB2_142: - LONG $0x24348b4c // mov r14, qword [rsp] - WORD $0x8948; BYTE $0xd6 // mov rsi, rdx + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x363b4466 // cmp r14w, word [rsi] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xff // mov rdi, r15 + LONG $0x03efc148 // shr rdi, 3 + LONG $0x14b60f45; BYTE $0x3b // movzx r10d, byte [r11 + rdi] + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd2 // xor dl, r10b + WORD $0xd320 // and bl, dl + WORD $0x3044; BYTE $0xd3 // xor bl, r10b + LONG $0x3b1c8841 // mov byte [r11 + rdi], bl + LONG $0x02c78349 // add r15, 2 + LONG $0x763b4466; BYTE $0x02 // cmp r14w, word [rsi + 2] + LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xdaf6 // neg dl + WORD $0xda30 // xor dl, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd020 // and al, dl + WORD $0xd830 // xor al, bl + LONG $0x3b048841 // mov byte [r11 + rdi], al + WORD $0x394d; BYTE $0xf9 // cmp r9, r15 + JNE LBB2_142 + JMP LBB2_173 LBB2_143: + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax + WORD $0x8948; BYTE $0xd6 // mov rsi, rdx + +LBB2_144: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB2_177 - WORD $0x3145; BYTE $0xff // xor r15d, r15d - JMP LBB2_179 + JNE LBB2_171 LBB2_146: - LONG $0x241c8b4c // mov r11, qword [rsp] - WORD $0x8948; BYTE $0xd3 // mov rbx, rdx + WORD $0x3145; BYTE $0xff // xor r15d, r15d + JMP LBB2_173 LBB2_147: + LONG $0x243c8b4c // mov r15, qword [rsp] + WORD $0x8948; BYTE $0xd3 // mov rbx, rdx + +LBB2_148: LONG $0x05e6c149 // shl r14, 5 WORD $0x394d; BYTE $0xd6 // cmp r14, r10 - JGE LBB2_176 + JGE LBB2_177 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf0 // sub r8, r14 WORD $0xf749; BYTE $0xd6 // not r14 WORD $0x014d; BYTE $0xd6 // add r14, r10 - JNE LBB2_181 + JNE LBB2_178 WORD $0xf631 // xor esi, esi - JMP LBB2_183 + JMP LBB2_180 -LBB2_150: +LBB2_151: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xff31 // xor edi, edi LONG $0x241c8b4c // mov r11, qword [rsp] -LBB2_151: +LBB2_152: WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al @@ -11440,21 +12123,21 @@ LBB2_151: WORD $0xd830 // xor al, bl LONG $0x33048841 // mov byte [r11 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB2_151 + JNE LBB2_152 -LBB2_152: +LBB2_153: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 + JE LBB2_177 WORD $0x3b44; BYTE $0x2a // cmp r13d, dword [rdx] - JMP LBB2_169 + JMP LBB2_170 -LBB2_156: +LBB2_157: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] -LBB2_157: +LBB2_158: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e343a46 // cmp r14b, byte [rsi + r9] WORD $0x940f; BYTE $0xd3 // sete bl @@ -11482,14 +12165,14 @@ LBB2_157: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB2_157 + JNE LBB2_158 -LBB2_158: +LBB2_159: WORD $0x014c; BYTE $0xce // add rsi, r9 LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 + JE LBB2_177 -LBB2_160: +LBB2_161: WORD $0x3a44; BYTE $0x36 // cmp r14b, byte [rsi] WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al @@ -11500,57 +12183,77 @@ LBB2_160: LONG $0x07e18041 // and r9b, 7 WORD $0x01b3 // mov bl, 1 WORD $0x8944; BYTE $0xc9 // mov ecx, r9d - JMP LBB2_174 + JMP LBB2_175 -LBB2_161: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 +LBB2_162: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi - LONG $0x241c8b4c // mov r11, qword [rsp] + LONG $0x24348b4c // mov r14, qword [rsp] -LBB2_162: +LBB2_163: LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd0 // sete al + WORD $0xc820 // and al, cl WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - LONG $0x0cb60f45; BYTE $0x33 // movzx r9d, byte [r11 + rsi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + LONG $0x14b60f45; BYTE $0x36 // movzx r10d, byte [r14 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x331c8841 // mov byte [r11 + rsi], bl + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b + WORD $0x2041; BYTE $0xc3 // and r11b, al + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x361c8845 // mov byte [r14 + rsi], r11b LONG $0x02c78348 // add rdi, 2 LONG $0x422e0f66; BYTE $0x08 // ucomisd xmm0, qword [rdx + 8] LONG $0x10528d48 // lea rdx, [rdx + 16] - LONG $0xd1940f41 // sete r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xd9 // xor r9b, bl + LONG $0xd29b0f41 // setnp r10b + WORD $0x940f; BYTE $0xd0 // sete al + WORD $0x2044; BYTE $0xd0 // and al, r10b + WORD $0xd8f6 // neg al + WORD $0x3044; BYTE $0xd8 // xor al, r11b WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x2044; BYTE $0xc8 // and al, r9b - WORD $0xd830 // xor al, bl - LONG $0x33048841 // mov byte [r11 + rsi], al - WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB2_162 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xdb // xor bl, r11b + LONG $0x361c8841 // mov byte [r14 + rsi], bl + WORD $0x3949; BYTE $0xf9 // cmp r9, rdi + JNE LBB2_163 -LBB2_163: - LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 - LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] - JMP LBB2_169 +LBB2_164: + LONG $0x01c0f641 // test r8b, 1 + JE LBB2_177 + LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x24048b4c // mov r8, qword [rsp] + LONG $0x00348a41 // mov sil, byte [r8 + rax] + LONG $0x07e78040 // and dil, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0xf989 // mov ecx, edi + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x001c8841 // mov byte [r8 + rax], bl + JMP LBB2_177 -LBB2_165: +LBB2_166: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xff31 // xor edi, edi LONG $0x241c8b4c // mov r11, qword [rsp] -LBB2_166: +LBB2_167: WORD $0x3b4c; BYTE $0x2a // cmp r13, qword [rdx] WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al @@ -11578,14 +12281,14 @@ LBB2_166: WORD $0xd830 // xor al, bl LONG $0x33048841 // mov byte [r11 + rsi], al WORD $0x3949; BYTE $0xfa // cmp r10, rdi - JNE LBB2_166 + JNE LBB2_167 -LBB2_167: +LBB2_168: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 + JE LBB2_177 WORD $0x3b4c; BYTE $0x2a // cmp r13, qword [rdx] -LBB2_169: +LBB2_170: WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfa // mov rdx, rdi @@ -11599,23 +12302,23 @@ LBB2_169: WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xf3 // xor bl, sil - JMP LBB2_175 + JMP LBB2_176 -LBB2_170: +LBB2_171: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] + WORD $0x3145; BYTE $0xff // xor r15d, r15d + LONG $0x245c8b4c; BYTE $0x10 // mov r11, qword [rsp + 16] -LBB2_171: +LBB2_172: WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x2e3b4466 // cmp r13w, word [rsi] + LONG $0x363b4466 // cmp r14w, word [rsi] WORD $0x940f; BYTE $0xd2 // sete dl WORD $0xdaf6 // neg dl - WORD $0x894c; BYTE $0xf7 // mov rdi, r14 + WORD $0x894c; BYTE $0xff // mov rdi, r15 LONG $0x03efc148 // shr rdi, 3 LONG $0x14b60f45; BYTE $0x3b // movzx r10d, byte [r11 + rdi] - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl @@ -11623,8 +12326,8 @@ LBB2_171: WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xd3 // xor bl, r10b LONG $0x3b1c8841 // mov byte [r11 + rdi], bl - LONG $0x02c68349 // add r14, 2 - LONG $0x6e3b4466; BYTE $0x02 // cmp r13w, word [rsi + 2] + LONG $0x02c78349 // add r15, 2 + LONG $0x763b4466; BYTE $0x02 // cmp r14w, word [rsi + 2] LONG $0x04768d48 // lea rsi, [rsi + 4] WORD $0x940f; BYTE $0xd2 // sete dl WORD $0xdaf6 // neg dl @@ -11635,153 +12338,104 @@ LBB2_171: WORD $0xd020 // and al, dl WORD $0xd830 // xor al, bl LONG $0x3b048841 // mov byte [r11 + rdi], al - WORD $0x394d; BYTE $0xf1 // cmp r9, r14 - JNE LBB2_171 + WORD $0x394d; BYTE $0xf9 // cmp r9, r15 + JNE LBB2_172 -LBB2_172: +LBB2_173: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 - LONG $0x2e3b4466 // cmp r13w, word [rsi] + JE LBB2_177 + LONG $0x363b4466 // cmp r14w, word [rsi] WORD $0x940f; BYTE $0xd0 // sete al WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xf2 // mov rdx, r14 + WORD $0x894c; BYTE $0xfa // mov rdx, r15 LONG $0x03eac148 // shr rdx, 3 - LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] + LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] LONG $0x103c8a41 // mov dil, byte [r8 + rdx] - LONG $0x07e68041 // and r14b, 7 + LONG $0x07e78041 // and r15b, 7 WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d -LBB2_174: +LBB2_175: WORD $0xe3d2 // shl bl, cl WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil -LBB2_175: +LBB2_176: LONG $0x101c8841 // mov byte [r8 + rdx], bl -LBB2_176: +LBB2_177: MOVQ 304(SP), SP RET -LBB2_177: +LBB2_178: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 - WORD $0x3145; BYTE $0xff // xor r15d, r15d + WORD $0xf631 // xor esi, esi + WORD $0x894d; BYTE $0xfe // mov r14, r15 -LBB2_178: - WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x2e3b4466 // cmp r13w, word [rsi] +LBB2_179: + WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] + WORD $0x9b0f; BYTE $0xd1 // setnp cl WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xca20 // and dl, cl WORD $0xdaf6 // neg dl - WORD $0x894c; BYTE $0xff // mov rdi, r15 + WORD $0x8948; BYTE $0xf7 // mov rdi, rsi LONG $0x03efc148 // shr rdi, 3 LONG $0x14b60f45; BYTE $0x3e // movzx r10d, byte [r14 + rdi] - WORD $0x8944; BYTE $0xf9 // mov ecx, r15d + WORD $0xf189 // mov ecx, esi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl WORD $0x3044; BYTE $0xd2 // xor dl, r10b - WORD $0xd320 // and bl, dl - WORD $0x3044; BYTE $0xd3 // xor bl, r10b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x02c78349 // add r15, 2 - LONG $0x6e3b4466; BYTE $0x02 // cmp r13w, word [rsi + 2] - LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x2041; BYTE $0xd3 // and r11b, dl + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x3e1c8845 // mov byte [r14 + rdi], r11b + LONG $0x02c68348 // add rsi, 2 + LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] + LONG $0x085b8d48 // lea rbx, [rbx + 8] + LONG $0xd29b0f41 // setnp r10b WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0x2044; BYTE $0xd2 // and dl, r10b WORD $0xdaf6 // neg dl - WORD $0xda30 // xor dl, bl + WORD $0x3044; BYTE $0xda // xor dl, r11b WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd020 // and al, dl - WORD $0xd830 // xor al, bl - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x394d; BYTE $0xf9 // cmp r9, r15 - JNE LBB2_178 - -LBB2_179: - LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 - LONG $0x2e3b4466 // cmp r13w, word [rsi] - WORD $0x940f; BYTE $0xd0 // sete al - WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xfa // mov rdx, r15 - LONG $0x03eac148 // shr rdx, 3 - LONG $0x163c8a41 // mov dil, byte [r14 + rdx] - LONG $0x07e78041 // and r15b, 7 - WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xf9 // mov ecx, r15d - WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al - WORD $0x3040; BYTE $0xfb // xor bl, dil - LONG $0x161c8841 // mov byte [r14 + rdx], bl - JMP LBB2_176 - -LBB2_181: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0xf631 // xor esi, esi - WORD $0x894d; BYTE $0xde // mov r14, r11 - -LBB2_182: - WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - WORD $0x940f; BYTE $0xd2 // sete dl - WORD $0xdaf6 // neg dl - WORD $0x8948; BYTE $0xf7 // mov rdi, rsi - LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b - WORD $0xf189 // mov ecx, esi - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd020 // and al, dl - WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0x3044; BYTE $0xd8 // xor al, r11b LONG $0x3e048841 // mov byte [r14 + rdi], al - LONG $0x02c68348 // add rsi, 2 - LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] - LONG $0x085b8d48 // lea rbx, [rbx + 8] - LONG $0xd1940f41 // sete r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xc1 // xor r9b, al - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0x2044; BYTE $0xca // and dl, r9b - WORD $0xc230 // xor dl, al - LONG $0x3e148841 // mov byte [r14 + rdi], dl - WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB2_182 + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + JNE LBB2_179 -LBB2_183: +LBB2_180: LONG $0x01c0f641 // test r8b, 1 - JE LBB2_176 + JE LBB2_177 WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - WORD $0x940f; BYTE $0xd0 // sete al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xf2 // mov rdx, rsi - LONG $0x03eac148 // shr rdx, 3 - LONG $0x133c8a41 // mov dil, byte [r11 + rdx] + WORD $0x9b0f; BYTE $0xd0 // setnp al + WORD $0x940f; BYTE $0xd2 // sete dl + WORD $0xc220 // and dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x073c8a41 // mov dil, byte [r15 + rax] LONG $0x07e68040 // and sil, 7 WORD $0x01b3 // mov bl, 1 WORD $0xf189 // mov ecx, esi WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xfa // xor dl, dil + WORD $0xd320 // and bl, dl WORD $0x3040; BYTE $0xfb // xor bl, dil - LONG $0x131c8841 // mov byte [r11 + rdx], bl - JMP LBB2_176 + LONG $0x071c8841 // mov byte [r15 + rax], bl + JMP LBB2_177 -LBB2_185: +LBB2_182: LONG $0xf0e78349 // and r15, -16 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx - QUAD $0x000000f824848948 // mov qword [rsp + 248], rax - QUAD $0x000000e824bc894c // mov qword [rsp + 232], r15 + QUAD $0x0000010824848948 // mov qword [rsp + 264], rax + QUAD $0x000000e024bc894c // mov qword [rsp + 224], r15 LONG $0x24048b48 // mov rax, qword [rsp] LONG $0xb8048d4a // lea rax, [rax + 4*r15] LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax @@ -11792,7 +12446,7 @@ LBB2_185: QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 WORD $0xc031 // xor eax, eax -LBB2_186: +LBB2_183: WORD $0x8948; BYTE $0xc7 // mov rdi, rax QUAD $0x0000009824848948 // mov qword [rsp + 152], rax LONG $0x05e7c148 // shl rdi, 5 @@ -11826,7 +12480,7 @@ LBB2_186: QUAD $0x0000d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm0 LONG $0x3a4cb60f; BYTE $0x08 // movzx ecx, byte [rdx + rdi + 8] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00010024847f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm0 + QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 LONG $0x3a4cb60f; BYTE $0x09 // movzx ecx, byte [rdx + rdi + 9] LONG $0x6e0f4466; BYTE $0xd1 // movd xmm10, ecx LONG $0x3a4cb60f; BYTE $0x0a // movzx ecx, byte [rdx + rdi + 10] @@ -11840,7 +12494,7 @@ LBB2_186: LONG $0x6e0f4466; BYTE $0xe1 // movd xmm12, ecx LONG $0x3a4cb60f; BYTE $0x0e // movzx ecx, byte [rdx + rdi + 14] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 LONG $0x247c8948; BYTE $0x40 // mov qword [rsp + 64], rdi WORD $0x8949; BYTE $0xfd // mov r13, rdi LONG $0x20cd8349 // or r13, 32 @@ -11865,15 +12519,15 @@ LBB2_186: LONG $0x80c88149; WORD $0x0001; BYTE $0x00 // or r8, 384 LONG $0x2444894c; BYTE $0x60 // mov qword [rsp + 96], r8 LONG $0x01a00d48; WORD $0x0000 // or rax, 416 - LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax WORD $0x8948; BYTE $0xf8 // mov rax, rdi LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax + LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax WORD $0x8948; BYTE $0xf8 // mov rax, rdi LONG $0x01e00d48; WORD $0x0000 // or rax, 480 QUAD $0x012a3c203a0f4666 // pinsrb xmm15, byte [rdx + r13], 1 QUAD $0x02323c203a0f4466 // pinsrb xmm15, byte [rdx + rsi], 2 - LONG $0x244c8948; BYTE $0x20 // mov qword [rsp + 32], rcx + LONG $0x244c8948; BYTE $0x38 // mov qword [rsp + 56], rcx QUAD $0x030a3c203a0f4466 // pinsrb xmm15, byte [rdx + rcx], 3 LONG $0x2474894c; BYTE $0x70 // mov qword [rsp + 112], r14 QUAD $0x04323c203a0f4666 // pinsrb xmm15, byte [rdx + r14], 4 @@ -11890,9 +12544,9 @@ LBB2_186: QUAD $0x0a2a3c203a0f4666 // pinsrb xmm15, byte [rdx + r13], 10 QUAD $0x0b1a3c203a0f4466 // pinsrb xmm15, byte [rdx + rbx], 11 QUAD $0x0c023c203a0f4666 // pinsrb xmm15, byte [rdx + r8], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0d323c203a0f4466 // pinsrb xmm15, byte [rdx + rsi], 13 LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + QUAD $0x0d323c203a0f4466 // pinsrb xmm15, byte [rdx + rsi], 13 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0e323c203a0f4466 // pinsrb xmm15, byte [rdx + rsi], 14 QUAD $0x0f023c203a0f4466 // pinsrb xmm15, byte [rdx + rax], 15 LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] @@ -11912,9 +12566,9 @@ LBB2_186: WORD $0x894d; BYTE $0xef // mov r15, r13 QUAD $0x0b011a6c203a0f66 // pinsrb xmm5, byte [rdx + rbx + 1], 11 QUAD $0x01026c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rdx + r8 + 1], 12 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] + LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] QUAD $0x010a6c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rdx + r9 + 1], 13 - LONG $0x245c8b4c; BYTE $0x10 // mov r11, qword [rsp + 16] + LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] QUAD $0x011a6c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rdx + r11 + 1], 14 QUAD $0x0f01026c203a0f66 // pinsrb xmm5, byte [rdx + rax + 1], 15 QUAD $0x00b0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 176] @@ -11931,7 +12585,7 @@ LBB2_186: QUAD $0x020274203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rdx + r8 + 2], 1 LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] QUAD $0x021274203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rdx + r10 + 2], 2 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x03020a74203a0f66 // pinsrb xmm6, byte [rdx + rcx + 2], 3 LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] QUAD $0x04021a74203a0f66 // pinsrb xmm6, byte [rdx + rbx + 2], 4 @@ -11978,13 +12632,13 @@ LBB2_186: QUAD $0x0a031a54203a0f66 // pinsrb xmm2, byte [rdx + rbx + 3], 10 QUAD $0x033254203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rdx + r14 + 3], 11 QUAD $0x033a54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rdx + r15 + 3], 12 - LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] QUAD $0x032254203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rdx + r12 + 3], 13 QUAD $0x031a54203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rdx + r11 + 3], 14 QUAD $0x030a54203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rdx + r9 + 3], 15 QUAD $0x0104324c203a0f66 // pinsrb xmm1, byte [rdx + rsi + 4], 1 QUAD $0x0204024c203a0f66 // pinsrb xmm1, byte [rdx + rax + 4], 2 - LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0304324c203a0f66 // pinsrb xmm1, byte [rdx + rsi + 4], 3 LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] QUAD $0x0404324c203a0f66 // pinsrb xmm1, byte [rdx + rsi + 4], 4 @@ -12000,7 +12654,7 @@ LBB2_186: QUAD $0x041a4c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rdx + r11 + 4], 14 QUAD $0x040a4c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rdx + r9 + 4], 15 WORD $0x894c; BYTE $0xc9 // mov rcx, r9 - LONG $0x244c894c; BYTE $0x38 // mov qword [rsp + 56], r9 + LONG $0x244c894c; BYTE $0x20 // mov qword [rsp + 32], r9 LONG $0xf7eb0f66 // por xmm6, xmm7 LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] LONG $0x74b60f42; WORD $0x110a // movzx esi, byte [rdx + r9 + 17] @@ -12017,7 +12671,7 @@ LBB2_186: LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] QUAD $0x052a44203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rdx + r13 + 5], 1 QUAD $0x050244203a0f4466; BYTE $0x02 // pinsrb xmm8, byte [rdx + rax + 5], 2 - LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] + LONG $0x245c8b4c; BYTE $0x38 // mov r11, qword [rsp + 56] QUAD $0x051a44203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rdx + r11 + 5], 3 LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x050244203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rdx + rax + 5], 4 @@ -12037,9 +12691,9 @@ LBB2_186: QUAD $0x053a44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rdx + r15 + 5], 11 LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] QUAD $0x052244203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rdx + r12 + 5], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x053244203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rdx + rsi + 5], 13 LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + QUAD $0x053244203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rdx + rsi + 5], 13 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x053244203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rdx + rsi + 5], 14 QUAD $0x050a44203a0f4466; BYTE $0x0f // pinsrb xmm8, byte [rdx + rcx + 5], 15 LONG $0x740f4566; BYTE $0xc1 // pcmpeqb xmm8, xmm9 @@ -12071,11 +12725,11 @@ LBB2_186: QUAD $0x063a5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rdx + r15 + 6], 11 WORD $0x894d; BYTE $0xe7 // mov r15, r12 QUAD $0x06225c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rdx + r12 + 6], 12 - LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] QUAD $0x06225c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rdx + r12 + 6], 13 - LONG $0x24748b4c; BYTE $0x10 // mov r14, qword [rsp + 16] + LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] QUAD $0x06325c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rdx + r14 + 6], 14 - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] QUAD $0x06025c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rdx + r8 + 6], 15 QUAD $0x0000d024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 208] QUAD $0x072a54203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rdx + r13 + 7], 1 @@ -12131,9 +12785,9 @@ LBB2_186: LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] QUAD $0x091a54203a0f4466; BYTE $0x0c // pinsrb xmm10, byte [rdx + rbx + 9], 12 QUAD $0x093a54203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rdx + r15 + 9], 13 - LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x093a54203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rdx + rdi + 9], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x093254203a0f4466; BYTE $0x0f // pinsrb xmm10, byte [rdx + rsi + 9], 15 LONG $0xeb0f4166; BYTE $0xc8 // por xmm1, xmm8 QUAD $0x0000d0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm1 @@ -12144,10 +12798,10 @@ LBB2_186: LONG $0xf80f4166; BYTE $0xca // psubb xmm1, xmm10 LONG $0x0274b60f; BYTE $0x16 // movzx esi, byte [rdx + rax + 22] LONG $0xde6e0f66 // movd xmm3, esi - QUAD $0x00010024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 256] + QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] QUAD $0x01080a64203a0f66 // pinsrb xmm4, byte [rdx + rcx + 8], 1 QUAD $0x081a64203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rdx + r11 + 8], 2 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x03080264203a0f66 // pinsrb xmm4, byte [rdx + rax + 8], 3 QUAD $0x080a64203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rdx + r9 + 8], 4 QUAD $0x082a64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rdx + r13 + 8], 5 @@ -12160,11 +12814,11 @@ LBB2_186: QUAD $0x082264203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rdx + r12 + 8], 10 QUAD $0x081264203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rdx + r10 + 8], 11 QUAD $0x0c081a64203a0f66 // pinsrb xmm4, byte [rdx + rbx + 8], 12 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] QUAD $0x0d081a64203a0f66 // pinsrb xmm4, byte [rdx + rbx + 8], 13 QUAD $0x0e083a64203a0f66 // pinsrb xmm4, byte [rdx + rdi + 8], 14 WORD $0x8949; BYTE $0xfa // mov r10, rdi - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] QUAD $0x0f080a64203a0f66 // pinsrb xmm4, byte [rdx + rcx + 8], 15 LONG $0x740f4166; BYTE $0xe1 // pcmpeqb xmm4, xmm9 LONG $0xdb0f4166; BYTE $0xe0 // pand xmm4, xmm8 @@ -12204,7 +12858,7 @@ LBB2_186: QUAD $0x0b3a5c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rdx + rdi + 11], 1 LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x0b0a5c203a0f4466; BYTE $0x02 // pinsrb xmm11, byte [rdx + rcx + 11], 2 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] QUAD $0x0b0a5c203a0f4466; BYTE $0x03 // pinsrb xmm11, byte [rdx + rcx + 11], 3 QUAD $0x0b025c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rdx + rax + 11], 4 WORD $0x894c; BYTE $0xd9 // mov rcx, r11 @@ -12221,15 +12875,15 @@ LBB2_186: QUAD $0x0b3a5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rdx + r15 + 11], 12 WORD $0x8949; BYTE $0xda // mov r10, rbx QUAD $0x0b1a5c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rdx + rbx + 11], 13 - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] QUAD $0x0b225c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rdx + r12 + 11], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0b325c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rdx + rsi + 11], 15 LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] QUAD $0x0c2a6c203a0f4666; BYTE $0x01 // pinsrb xmm13, byte [rdx + r13 + 12], 1 LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] QUAD $0x0c1a6c203a0f4466; BYTE $0x02 // pinsrb xmm13, byte [rdx + rbx + 12], 2 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] QUAD $0x0c1a6c203a0f4466; BYTE $0x03 // pinsrb xmm13, byte [rdx + rbx + 12], 3 QUAD $0x0c026c203a0f4466; BYTE $0x04 // pinsrb xmm13, byte [rdx + rax + 12], 4 QUAD $0x0c0a6c203a0f4466; BYTE $0x05 // pinsrb xmm13, byte [rdx + rcx + 12], 5 @@ -12249,7 +12903,7 @@ LBB2_186: QUAD $0x0d1264203a0f4666; BYTE $0x01 // pinsrb xmm12, byte [rdx + r10 + 13], 1 LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0d3264203a0f4466; BYTE $0x02 // pinsrb xmm12, byte [rdx + rsi + 13], 2 - LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0d3264203a0f4466; BYTE $0x03 // pinsrb xmm12, byte [rdx + rsi + 13], 3 QUAD $0x0d0264203a0f4466; BYTE $0x04 // pinsrb xmm12, byte [rdx + rax + 13], 4 QUAD $0x0d0a64203a0f4466; BYTE $0x05 // pinsrb xmm12, byte [rdx + rcx + 13], 5 @@ -12262,7 +12916,7 @@ LBB2_186: QUAD $0x0d3a64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rdx + r15 + 13], 12 QUAD $0x0d2a64203a0f4666; BYTE $0x0d // pinsrb xmm12, byte [rdx + r13 + 13], 13 QUAD $0x0d2264203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rdx + r12 + 13], 14 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d0264203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rdx + rax + 13], 15 LONG $0x740f4566; BYTE $0xd9 // pcmpeqb xmm11, xmm9 QUAD $0x0000c09ddb0f4466; BYTE $0x00 // pand xmm11, oword 192[rbp] /* [rip + .LCPI2_12] */ @@ -12277,12 +12931,12 @@ LBB2_186: LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 LONG $0x0274b60f; BYTE $0x1a // movzx esi, byte [rdx + rax + 26] LONG $0x6e0f4466; BYTE $0xde // movd xmm11, esi - QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] + QUAD $0x0000f024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 240] QUAD $0x0e1264203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rdx + r10 + 14], 1 WORD $0x894c; BYTE $0xd6 // mov rsi, r10 LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] QUAD $0x0e2264203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rdx + r12 + 14], 2 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + LONG $0x24548b4c; BYTE $0x38 // mov r10, qword [rsp + 56] QUAD $0x0e1264203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rdx + r10 + 14], 3 LONG $0x246c8b4c; BYTE $0x70 // mov r13, qword [rsp + 112] QUAD $0x0e2a64203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rdx + r13 + 14], 4 @@ -12294,11 +12948,11 @@ LBB2_186: QUAD $0x0a0e1a64203a0f66 // pinsrb xmm4, byte [rdx + rbx + 14], 10 QUAD $0x0e3264203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rdx + r14 + 14], 11 QUAD $0x0e3a64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rdx + r15 + 14], 12 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0d0e0264203a0f66 // pinsrb xmm4, byte [rdx + rax + 14], 13 LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x0d0e0264203a0f66 // pinsrb xmm4, byte [rdx + rax + 14], 13 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0e0e0264203a0f66 // pinsrb xmm4, byte [rdx + rax + 14], 14 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0f0e0264203a0f66 // pinsrb xmm4, byte [rdx + rax + 14], 15 QUAD $0x0f3274203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rdx + rsi + 15], 1 QUAD $0x0f2274203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rdx + r12 + 15], 2 @@ -12312,11 +12966,11 @@ LBB2_186: QUAD $0x0f1a74203a0f4466; BYTE $0x0a // pinsrb xmm14, byte [rdx + rbx + 15], 10 QUAD $0x0f3274203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rdx + r14 + 15], 11 QUAD $0x0f3a74203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rdx + r15 + 15], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] QUAD $0x0f3274203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rdx + rsi + 15], 13 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x0f0274203a0f4466; BYTE $0x0e // pinsrb xmm14, byte [rdx + rax + 15], 14 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] QUAD $0x0f3274203a0f4466; BYTE $0x0f // pinsrb xmm14, byte [rdx + rsi + 15], 15 LONG $0x24748b48; BYTE $0x18 // mov rsi, qword [rsp + 24] QUAD $0x10327c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rdx + rsi + 16], 1 @@ -12331,7 +12985,7 @@ LBB2_186: QUAD $0x101a7c203a0f4466; BYTE $0x0a // pinsrb xmm15, byte [rdx + rbx + 16], 10 QUAD $0x10327c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rdx + r14 + 16], 11 QUAD $0x103a7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rdx + r15 + 16], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] QUAD $0x10327c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rdx + rsi + 16], 13 QUAD $0x10027c203a0f4466; BYTE $0x0e // pinsrb xmm15, byte [rdx + rax + 16], 14 LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] @@ -12348,9 +13002,9 @@ LBB2_186: QUAD $0x0a111a44203a0f66 // pinsrb xmm0, byte [rdx + rbx + 17], 10 QUAD $0x113244203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rdx + r14 + 17], 11 QUAD $0x113a44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rdx + r15 + 17], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0d113244203a0f66 // pinsrb xmm0, byte [rdx + rsi + 17], 13 LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + QUAD $0x0d113244203a0f66 // pinsrb xmm0, byte [rdx + rsi + 17], 13 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0e113244203a0f66 // pinsrb xmm0, byte [rdx + rsi + 17], 14 QUAD $0x00a024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 160] LONG $0x24648b4c; BYTE $0x40 // mov r12, qword [rsp + 64] @@ -12365,7 +13019,7 @@ LBB2_186: LONG $0xeb0f4466; BYTE $0xf4 // por xmm14, xmm4 LONG $0x74b60f42; WORD $0x1c22 // movzx esi, byte [rdx + r12 + 28] LONG $0xe66e0f66 // movd xmm4, esi - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] QUAD $0x110244203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rdx + r8 + 17], 15 LONG $0xeb0f4566; BYTE $0xf4 // por xmm14, xmm12 LONG $0x740f4166; BYTE $0xc5 // pcmpeqb xmm0, xmm13 @@ -12393,9 +13047,9 @@ LBB2_186: QUAD $0x0a121a6c203a0f66 // pinsrb xmm5, byte [rdx + rbx + 18], 10 QUAD $0x12326c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rdx + r14 + 18], 11 QUAD $0x123a6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rdx + r15 + 18], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0d12326c203a0f66 // pinsrb xmm5, byte [rdx + rsi + 18], 13 LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + QUAD $0x0d12326c203a0f66 // pinsrb xmm5, byte [rdx + rsi + 18], 13 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0e12326c203a0f66 // pinsrb xmm5, byte [rdx + rsi + 18], 14 LONG $0xdb0f4566; BYTE $0xfc // pand xmm15, xmm12 QUAD $0x12026c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rdx + r8 + 18], 15 @@ -12445,9 +13099,9 @@ LBB2_186: QUAD $0x0a131a7c203a0f66 // pinsrb xmm7, byte [rdx + rbx + 19], 10 QUAD $0x13327c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rdx + r14 + 19], 11 QUAD $0x133a7c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rdx + r15 + 19], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] QUAD $0x0d13327c203a0f66 // pinsrb xmm7, byte [rdx + rsi + 19], 13 - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] QUAD $0x13227c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rdx + r12 + 19], 14 QUAD $0x13027c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rdx + r8 + 19], 15 QUAD $0x141274203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rdx + r10 + 20], 3 @@ -12676,23 +13330,23 @@ LBB2_186: LONG $0x1c7f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm3 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000e8248c3b48 // cmp rcx, qword [rsp + 232] - JNE LBB2_186 - QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] - QUAD $0x000000e824bc3b4c // cmp r15, qword [rsp + 232] + QUAD $0x000000e0248c3b48 // cmp rcx, qword [rsp + 224] + JNE LBB2_183 + QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] + QUAD $0x000000e024bc3b4c // cmp r15, qword [rsp + 224] LONG $0x24748a44; BYTE $0x08 // mov r14b, byte [rsp + 8] - QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] JNE LBB2_43 JMP LBB2_131 -LBB2_188: +LBB2_185: LONG $0xf0e78349 // and r15, -16 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xd0 // add rax, rdx - QUAD $0x000000f824848948 // mov qword [rsp + 248], rax - QUAD $0x000000e824bc894c // mov qword [rsp + 232], r15 + QUAD $0x0000010824848948 // mov qword [rsp + 264], rax + QUAD $0x000000e024bc894c // mov qword [rsp + 224], r15 LONG $0x24048b48 // mov rax, qword [rsp] LONG $0xb8048d4a // lea rax, [rax + 4*r15] LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax @@ -12703,7 +13357,7 @@ LBB2_188: QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 WORD $0xc031 // xor eax, eax -LBB2_189: +LBB2_186: WORD $0x8949; BYTE $0xc7 // mov r15, rax QUAD $0x0000009824848948 // mov qword [rsp + 152], rax LONG $0x05e7c149 // shl r15, 5 @@ -12717,7 +13371,7 @@ LBB2_189: WORD $0x894d; BYTE $0xfc // mov r12, r15 WORD $0x894d; BYTE $0xfa // mov r10, r15 WORD $0x894d; BYTE $0xfd // mov r13, r15 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LONG $0x34b60f42; BYTE $0x3a // movzx esi, byte [rdx + r15] LONG $0x6e0f4466; BYTE $0xfe // movd xmm15, esi LONG $0x74b60f42; WORD $0x013a // movzx esi, byte [rdx + r15 + 1] @@ -12737,7 +13391,7 @@ LBB2_189: QUAD $0x0000c024847f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm0 LONG $0x74b60f42; WORD $0x083a // movzx esi, byte [rdx + r15 + 8] LONG $0xc66e0f66 // movd xmm0, esi - QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 LONG $0x74b60f42; WORD $0x093a // movzx esi, byte [rdx + r15 + 9] LONG $0x6e0f4466; BYTE $0xd6 // movd xmm10, esi LONG $0x74b60f42; WORD $0x0a3a // movzx esi, byte [rdx + r15 + 10] @@ -12751,11 +13405,11 @@ LBB2_189: LONG $0x6e0f4466; BYTE $0xe6 // movd xmm12, esi LONG $0x74b60f42; WORD $0x0e3a // movzx esi, byte [rdx + r15 + 14] LONG $0xc66e0f66 // movd xmm0, esi - QUAD $0x00010024847f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm0 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 WORD $0x894d; BYTE $0xfe // mov r14, r15 LONG $0x20ce8349 // or r14, 32 - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2474894c; BYTE $0x10 // mov qword [rsp + 16], r14 LONG $0x40cb8348 // or rbx, 64 LONG $0x245c8948; BYTE $0x48 // mov qword [rsp + 72], rbx LONG $0x60c88348 // or rax, 96 @@ -12772,11 +13426,11 @@ LBB2_189: WORD $0x894d; BYTE $0xfa // mov r10, r15 LONG $0x60ca8149; WORD $0x0001; BYTE $0x00 // or r10, 352 LONG $0x2454894c; BYTE $0x50 // mov qword [rsp + 80], r10 - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] LONG $0x80cc8149; WORD $0x0001; BYTE $0x00 // or r12, 384 WORD $0x894c; BYTE $0xfe // mov rsi, r15 LONG $0xa0ce8148; WORD $0x0001; BYTE $0x00 // or rsi, 416 - LONG $0x24748948; BYTE $0x10 // mov qword [rsp + 16], rsi + LONG $0x24748948; BYTE $0x30 // mov qword [rsp + 48], rsi LONG $0xc0cd8149; WORD $0x0001; BYTE $0x00 // or r13, 448 LONG $0x246c894c; BYTE $0x18 // mov qword [rsp + 24], r13 WORD $0x894c; BYTE $0xfe // mov rsi, r15 @@ -12799,13 +13453,13 @@ LBB2_189: LONG $0x247c8b4c; BYTE $0x58 // mov r15, qword [rsp + 88] QUAD $0x0a3a3c203a0f4666 // pinsrb xmm15, byte [rdx + r15], 10 QUAD $0x0b123c203a0f4666 // pinsrb xmm15, byte [rdx + r10], 11 - LONG $0x2464894c; BYTE $0x20 // mov qword [rsp + 32], r12 + LONG $0x2464894c; BYTE $0x38 // mov qword [rsp + 56], r12 QUAD $0x0c223c203a0f4666 // pinsrb xmm15, byte [rdx + r12], 12 - LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] + LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] QUAD $0x0d123c203a0f4666 // pinsrb xmm15, byte [rdx + r10], 13 QUAD $0x0e2a3c203a0f4666 // pinsrb xmm15, byte [rdx + r13], 14 QUAD $0x0f323c203a0f4466 // pinsrb xmm15, byte [rdx + rsi], 15 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] QUAD $0x01011a6c203a0f66 // pinsrb xmm5, byte [rdx + rbx + 1], 1 LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] QUAD $0x02011a6c203a0f66 // pinsrb xmm5, byte [rdx + rbx + 1], 2 @@ -12830,11 +13484,11 @@ LBB2_189: QUAD $0x000000a0a56f0f66 // movdqa xmm4, oword 160[rbp] /* [rip + .LCPI2_10] */ LONG $0xfcdb0f66 // pand xmm7, xmm4 LONG $0xfdf80f66 // psubb xmm7, xmm5 - LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] LONG $0x74b60f42; WORD $0x0f2a // movzx esi, byte [rdx + r13 + 15] LONG $0x6e0f4466; BYTE $0xf6 // movd xmm14, esi LONG $0x740f4566; BYTE $0xf9 // pcmpeqb xmm15, xmm9 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] QUAD $0x01020a74203a0f66 // pinsrb xmm6, byte [rdx + rcx + 2], 1 QUAD $0x02021a74203a0f66 // pinsrb xmm6, byte [rdx + rbx + 2], 2 LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] @@ -12853,9 +13507,9 @@ LBB2_189: QUAD $0x0a023274203a0f66 // pinsrb xmm6, byte [rdx + rsi + 2], 10 LONG $0x24548b4c; BYTE $0x50 // mov r10, qword [rsp + 80] QUAD $0x021274203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rdx + r10 + 2], 11 - LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] + LONG $0x244c8b4c; BYTE $0x38 // mov r9, qword [rsp + 56] QUAD $0x020a74203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rdx + r9 + 2], 12 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0d023274203a0f66 // pinsrb xmm6, byte [rdx + rsi + 2], 13 LONG $0x24748b48; BYTE $0x18 // mov rsi, qword [rsp + 24] QUAD $0x0e023274203a0f66 // pinsrb xmm6, byte [rdx + rsi + 2], 14 @@ -12885,13 +13539,13 @@ LBB2_189: QUAD $0x033a54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rdx + r15 + 3], 10 QUAD $0x031254203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rdx + r10 + 3], 11 QUAD $0x030a54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rdx + r9 + 3], 12 - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] QUAD $0x032254203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rdx + r12 + 3], 13 LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0e030254203a0f66 // pinsrb xmm2, byte [rdx + rax + 3], 14 LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0f030254203a0f66 // pinsrb xmm2, byte [rdx + rax + 3], 15 - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] + LONG $0x246c8b4c; BYTE $0x10 // mov r13, qword [rsp + 16] QUAD $0x042a4c203a0f4266; BYTE $0x01 // pinsrb xmm1, byte [rdx + r13 + 4], 1 LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] QUAD $0x02041a4c203a0f66 // pinsrb xmm1, byte [rdx + rbx + 4], 2 @@ -12914,7 +13568,7 @@ LBB2_189: QUAD $0x0f04024c203a0f66 // pinsrb xmm1, byte [rdx + rax + 4], 15 WORD $0x8949; BYTE $0xc2 // mov r10, rax LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] LONG $0x3a74b60f; BYTE $0x11 // movzx esi, byte [rdx + rdi + 17] LONG $0xc66e0f66 // movd xmm0, esi LONG $0x740f4166; BYTE $0xd1 // pcmpeqb xmm2, xmm9 @@ -12926,7 +13580,7 @@ LBB2_189: LONG $0xcaeb0f66 // por xmm1, xmm2 LONG $0x3a74b60f; BYTE $0x12 // movzx esi, byte [rdx + rdi + 18] LONG $0xee6e0f66 // movd xmm5, esi - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] + LONG $0x246c8b4c; BYTE $0x10 // mov r13, qword [rsp + 16] QUAD $0x052a44203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rdx + r13 + 5], 1 LONG $0x245c8b4c; BYTE $0x48 // mov r11, qword [rsp + 72] QUAD $0x051a44203a0f4666; BYTE $0x02 // pinsrb xmm8, byte [rdx + r11 + 5], 2 @@ -12946,9 +13600,9 @@ LBB2_189: LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] QUAD $0x053244203a0f4466; BYTE $0x0a // pinsrb xmm8, byte [rdx + rsi + 5], 10 QUAD $0x053a44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rdx + r15 + 5], 11 - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] QUAD $0x052244203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rdx + r12 + 5], 12 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x053244203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rdx + rsi + 5], 13 QUAD $0x051a44203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rdx + rbx + 5], 14 QUAD $0x051244203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rdx + r10 + 5], 15 @@ -12981,7 +13635,7 @@ LBB2_189: QUAD $0x0b06025c203a0f66 // pinsrb xmm3, byte [rdx + rax + 6], 11 WORD $0x894c; BYTE $0xe3 // mov rbx, r12 QUAD $0x06225c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rdx + r12 + 6], 12 - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] QUAD $0x06225c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rdx + r12 + 6], 13 LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] QUAD $0x0e060a5c203a0f66 // pinsrb xmm3, byte [rdx + rcx + 6], 14 @@ -13019,10 +13673,10 @@ LBB2_189: LONG $0xd1db0f66 // pand xmm2, xmm1 LONG $0xd3eb0f66 // por xmm2, xmm3 LONG $0xca6f0f66 // movdqa xmm1, xmm2 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] LONG $0x0274b60f; BYTE $0x15 // movzx esi, byte [rdx + rax + 21] LONG $0xd66e0f66 // movd xmm2, esi - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] + LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] QUAD $0x090a54203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rdx + r9 + 9], 1 QUAD $0x092a54203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rdx + r13 + 9], 2 LONG $0x24448b4c; BYTE $0x70 // mov r8, qword [rsp + 112] @@ -13039,7 +13693,7 @@ LBB2_189: QUAD $0x093a54203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rdx + r15 + 9], 10 LONG $0x24748b48; BYTE $0x50 // mov rsi, qword [rsp + 80] QUAD $0x093254203a0f4466; BYTE $0x0b // pinsrb xmm10, byte [rdx + rsi + 9], 11 - LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x093254203a0f4466; BYTE $0x0c // pinsrb xmm10, byte [rdx + rsi + 9], 12 QUAD $0x091a54203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rdx + r11 + 9], 13 QUAD $0x092254203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rdx + r12 + 9], 14 @@ -13053,7 +13707,7 @@ LBB2_189: LONG $0xf80f4166; BYTE $0xca // psubb xmm1, xmm10 LONG $0x0274b60f; BYTE $0x16 // movzx esi, byte [rdx + rax + 22] LONG $0xde6e0f66 // movd xmm3, esi - QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] + QUAD $0x0000f024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 240] QUAD $0x080a64203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rdx + r9 + 8], 1 LONG $0x24648b4c; BYTE $0x48 // mov r12, qword [rsp + 72] QUAD $0x082264203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rdx + r12 + 8], 2 @@ -13069,7 +13723,7 @@ LBB2_189: QUAD $0x083a64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rdx + r15 + 8], 10 LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0b080264203a0f66 // pinsrb xmm4, byte [rdx + rax + 8], 11 - LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] QUAD $0x0c083264203a0f66 // pinsrb xmm4, byte [rdx + rsi + 8], 12 QUAD $0x081a64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rdx + r11 + 8], 13 LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] @@ -13102,14 +13756,14 @@ LBB2_189: LONG $0x740f4566; BYTE $0xd1 // pcmpeqb xmm10, xmm9 QUAD $0x0000b095db0f4466; BYTE $0x00 // pand xmm10, oword 176[rbp] /* [rip + .LCPI2_11] */ LONG $0xeb0f4466; BYTE $0xd4 // por xmm10, xmm4 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] LONG $0x0a74b60f; BYTE $0x17 // movzx esi, byte [rdx + rcx + 23] LONG $0x6e0f4466; BYTE $0xc6 // movd xmm8, esi LONG $0xeb0f4466; BYTE $0xd1 // por xmm10, xmm1 QUAD $0x00a024947f0f4466; WORD $0x0000 // movdqa oword [rsp + 160], xmm10 LONG $0x0a74b60f; BYTE $0x18 // movzx esi, byte [rdx + rcx + 24] LONG $0x6e0f4466; BYTE $0xd6 // movd xmm10, esi - LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] + LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] QUAD $0x0b125c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rdx + r10 + 11], 1 QUAD $0x0b225c203a0f4666; BYTE $0x02 // pinsrb xmm11, byte [rdx + r12 + 11], 2 WORD $0x894c; BYTE $0xc9 // mov rcx, r9 @@ -13126,9 +13780,9 @@ LBB2_189: QUAD $0x0b1a5c203a0f4466; BYTE $0x0a // pinsrb xmm11, byte [rdx + rbx + 11], 10 WORD $0x8948; BYTE $0xc3 // mov rbx, rax QUAD $0x0b025c203a0f4466; BYTE $0x0b // pinsrb xmm11, byte [rdx + rax + 11], 11 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] QUAD $0x0b2a5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rdx + r13 + 11], 12 - LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] + LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] QUAD $0x0b0a5c203a0f4666; BYTE $0x0d // pinsrb xmm11, byte [rdx + r9 + 11], 13 LONG $0x24748b48; BYTE $0x18 // mov rsi, qword [rsp + 24] QUAD $0x0b325c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rdx + rsi + 11], 14 @@ -13176,7 +13830,7 @@ LBB2_189: LONG $0x740f4566; BYTE $0xe9 // pcmpeqb xmm13, xmm9 QUAD $0x0000d0addb0f4466; BYTE $0x00 // pand xmm13, oword 208[rbp] /* [rip + .LCPI2_13] */ LONG $0xeb0f4566; BYTE $0xeb // por xmm13, xmm11 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] LONG $0x1a74b60f; BYTE $0x19 // movzx esi, byte [rdx + rbx + 25] LONG $0xce6e0f66 // movd xmm1, esi LONG $0x740f4566; BYTE $0xe1 // pcmpeqb xmm12, xmm9 @@ -13184,8 +13838,8 @@ LBB2_189: LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 LONG $0x1a74b60f; BYTE $0x1a // movzx esi, byte [rdx + rbx + 26] LONG $0x6e0f4466; BYTE $0xde // movd xmm11, esi - QUAD $0x00010024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 256] - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x010e0264203a0f66 // pinsrb xmm4, byte [rdx + rax + 14], 1 QUAD $0x0e2264203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rdx + r12 + 14], 2 QUAD $0x0e1264203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rdx + r10 + 14], 3 @@ -13204,14 +13858,14 @@ LBB2_189: QUAD $0x0e3a64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rdx + r15 + 14], 10 LONG $0x24748b4c; BYTE $0x50 // mov r14, qword [rsp + 80] QUAD $0x0e3264203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rdx + r14 + 14], 11 - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] QUAD $0x0e3a64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rdx + r15 + 14], 12 QUAD $0x0e2a64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rdx + r13 + 14], 13 LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] QUAD $0x0e2a64203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rdx + r13 + 14], 14 LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0f0e3264203a0f66 // pinsrb xmm4, byte [rdx + rsi + 14], 15 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] QUAD $0x0f3274203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rdx + rsi + 15], 1 QUAD $0x0f2274203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rdx + r12 + 15], 2 QUAD $0x0f1274203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rdx + r10 + 15], 3 @@ -13224,12 +13878,12 @@ LBB2_189: QUAD $0x0f1a74203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rdx + r11 + 15], 10 QUAD $0x0f3274203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rdx + r14 + 15], 11 QUAD $0x0f3a74203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rdx + r15 + 15], 12 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0f3274203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rdx + rsi + 15], 13 QUAD $0x0f2a74203a0f4666; BYTE $0x0e // pinsrb xmm14, byte [rdx + r13 + 15], 14 LONG $0x24748b48; BYTE $0x28 // mov rsi, qword [rsp + 40] QUAD $0x0f3274203a0f4466; BYTE $0x0f // pinsrb xmm14, byte [rdx + rsi + 15], 15 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] QUAD $0x10327c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rdx + rsi + 16], 1 QUAD $0x10227c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rdx + r12 + 16], 2 QUAD $0x10127c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rdx + r10 + 16], 3 @@ -13242,10 +13896,10 @@ LBB2_189: QUAD $0x101a7c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rdx + r11 + 16], 10 QUAD $0x10327c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rdx + r14 + 16], 11 QUAD $0x103a7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rdx + r15 + 16], 12 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x10327c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rdx + rsi + 16], 13 QUAD $0x102a7c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rdx + r13 + 16], 14 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] QUAD $0x01113244203a0f66 // pinsrb xmm0, byte [rdx + rsi + 17], 1 QUAD $0x112244203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rdx + r12 + 17], 2 QUAD $0x111244203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rdx + r10 + 17], 3 @@ -13260,12 +13914,12 @@ LBB2_189: QUAD $0x111a44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rdx + r11 + 17], 10 QUAD $0x113244203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rdx + r14 + 17], 11 QUAD $0x113a44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rdx + r15 + 17], 12 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0d113244203a0f66 // pinsrb xmm0, byte [rdx + rsi + 17], 13 LONG $0x24748b48; BYTE $0x18 // mov rsi, qword [rsp + 24] QUAD $0x0e113244203a0f66 // pinsrb xmm0, byte [rdx + rsi + 17], 14 QUAD $0x00a024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 160] - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] + LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] LONG $0x74b60f42; WORD $0x1b22 // movzx esi, byte [rdx + r12 + 27] LONG $0x6e0f4466; BYTE $0xce // movd xmm9, esi QUAD $0x00b024ac6f0f4466; WORD $0x0000 // movdqa xmm13, oword [rsp + 176] @@ -13291,7 +13945,7 @@ LBB2_189: QUAD $0x10027c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rdx + r8 + 16], 15 QUAD $0x0000b024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 176] LONG $0x740f4466; BYTE $0xf8 // pcmpeqb xmm15, xmm0 - LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] QUAD $0x12226c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rdx + r12 + 18], 1 LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] QUAD $0x0212326c203a0f66 // pinsrb xmm5, byte [rdx + rsi + 18], 2 @@ -13305,7 +13959,7 @@ LBB2_189: QUAD $0x121a6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rdx + r11 + 18], 10 QUAD $0x12326c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rdx + r14 + 18], 11 QUAD $0x123a6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rdx + r15 + 18], 12 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0d12326c203a0f66 // pinsrb xmm5, byte [rdx + rsi + 18], 13 LONG $0x24748b48; BYTE $0x18 // mov rsi, qword [rsp + 24] QUAD $0x0e12326c203a0f66 // pinsrb xmm5, byte [rdx + rsi + 18], 14 @@ -13314,7 +13968,7 @@ LBB2_189: LONG $0xe8740f66 // pcmpeqb xmm5, xmm0 QUAD $0x000000b0addb0f66 // pand xmm5, oword 176[rbp] /* [rip + .LCPI2_11] */ LONG $0xeb0f4166; BYTE $0xef // por xmm5, xmm15 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] LONG $0x0274b60f; BYTE $0x1e // movzx esi, byte [rdx + rax + 30] LONG $0x6e0f4466; BYTE $0xe6 // movd xmm12, esi QUAD $0x13227c203a0f4266; BYTE $0x01 // pinsrb xmm7, byte [rdx + r12 + 19], 1 @@ -13357,7 +14011,7 @@ LBB2_189: QUAD $0x131a7c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rdx + r11 + 19], 10 QUAD $0x13327c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rdx + r14 + 19], 11 QUAD $0x133a7c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rdx + r15 + 19], 12 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] QUAD $0x0d13327c203a0f66 // pinsrb xmm7, byte [rdx + rsi + 19], 13 LONG $0x24648b4c; BYTE $0x18 // mov r12, qword [rsp + 24] QUAD $0x13227c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rdx + r12 + 19], 14 @@ -13588,28 +14242,27 @@ LBB2_189: LONG $0x1c7f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm3 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000e8248c3b48 // cmp rcx, qword [rsp + 232] - JNE LBB2_189 - QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] - QUAD $0x000000e824bc3b4c // cmp r15, qword [rsp + 232] + QUAD $0x000000e0248c3b48 // cmp rcx, qword [rsp + 224] + JNE LBB2_186 + QUAD $0x000000e824bc8b4c // mov r15, qword [rsp + 232] + QUAD $0x000000e024bc3b4c // cmp r15, qword [rsp + 224] LONG $0x24748a44; BYTE $0x08 // mov r14b, byte [rsp + 8] - QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] JNE LBB2_69 JMP LBB2_135 -LBB2_191: - LONG $0xf8e68349 // and r14, -8 - WORD $0x894c; BYTE $0xf0 // mov rax, r14 +LBB2_188: + LONG $0xf8e78349 // and r15, -8 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xd0 // add rax, rdx LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax LONG $0x24048b48 // mov rax, qword [rsp] - LONG $0x2474894c; BYTE $0x20 // mov qword [rsp + 32], r14 - LONG $0xb0048d4a // lea rax, [rax + 4*r14] - LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax - LONG $0x246c8944; BYTE $0x38 // mov dword [rsp + 56], r13d - LONG $0x6e0f4166; BYTE $0xc5 // movd xmm0, r13d + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0xb8048d4a // lea rax, [rax + 4*r15] + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax + LONG $0x6e0f4166; BYTE $0xc6 // movd xmm0, r14d LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 LONG $0xc0700f66; BYTE $0x00 // pshufd xmm0, xmm0, 0 WORD $0x3145; BYTE $0xff // xor r15d, r15d @@ -13621,7 +14274,7 @@ LBB2_191: LONG $0x6f0f4466; WORD $0x506d // movdqa xmm13, oword 80[rbp] /* [rip + .LCPI2_5] */ LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI2_6] */ -LBB2_192: +LBB2_189: LONG $0x247c894c; BYTE $0x30 // mov qword [rsp + 48], r15 LONG $0x06e7c149 // shl r15, 6 WORD $0x894d; BYTE $0xf9 // mov r9, r15 @@ -13630,9 +14283,9 @@ LBB2_192: WORD $0x894c; BYTE $0xf9 // mov rcx, r15 WORD $0x894c; BYTE $0xff // mov rdi, r15 WORD $0x894c; BYTE $0xfb // mov rbx, r15 - LONG $0x04b70f42; BYTE $0x3a // movzx eax, word [rdx + r15] - LONG $0x54b70f46; WORD $0x023a // movzx r10d, word [rdx + r15 + 2] - LONG $0x74b70f46; WORD $0x043a // movzx r14d, word [rdx + r15 + 4] + LONG $0x34b70f46; BYTE $0x3a // movzx r14d, word [rdx + r15] + LONG $0x44b70f42; WORD $0x023a // movzx eax, word [rdx + r15 + 2] + LONG $0x54b70f46; WORD $0x043a // movzx r10d, word [rdx + r15 + 4] LONG $0x74b70f42; WORD $0x063a // movzx esi, word [rdx + r15 + 6] LONG $0x5cb70f46; WORD $0x083a // movzx r11d, word [rdx + r15 + 8] WORD $0x894d; BYTE $0xf8 // mov r8, r15 @@ -13643,7 +14296,7 @@ LBB2_192: LONG $0x40c98148; WORD $0x0001; BYTE $0x00 // or rcx, 320 LONG $0x80cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 384 LONG $0xc0cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 448 - LONG $0xe06e0f66 // movd xmm4, eax + LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d LONG $0xc40f4266; WORD $0x0224; BYTE $0x01 // pinsrw xmm4, word [rdx + r8], 1 LONG $0xc40f4266; WORD $0x0a24; BYTE $0x02 // pinsrw xmm4, word [rdx + r9], 2 LONG $0xc40f4266; WORD $0x2224; BYTE $0x03 // pinsrw xmm4, word [rdx + r12], 3 @@ -13651,31 +14304,31 @@ LBB2_192: LONG $0x24c40f66; WORD $0x050a // pinsrw xmm4, word [rdx + rcx], 5 LONG $0x24c40f66; WORD $0x063a // pinsrw xmm4, word [rdx + rdi], 6 LONG $0x24c40f66; WORD $0x071a // pinsrw xmm4, word [rdx + rbx], 7 - LONG $0x44b70f42; WORD $0x0a3a // movzx eax, word [rdx + r15 + 10] - LONG $0x18244489 // mov dword [rsp + 24], eax - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d + LONG $0x74b70f46; WORD $0x0a3a // movzx r14d, word [rdx + r15 + 10] + LONG $0xf06e0f66 // movd xmm6, eax QUAD $0x01020274c40f4266 // pinsrw xmm6, word [rdx + r8 + 2], 1 QUAD $0x02020a74c40f4266 // pinsrw xmm6, word [rdx + r9 + 2], 2 QUAD $0x03022274c40f4266 // pinsrw xmm6, word [rdx + r12 + 2], 3 LONG $0x44b70f42; WORD $0x0c3a // movzx eax, word [rdx + r15 + 12] - LONG $0x10244489 // mov dword [rsp + 16], eax + LONG $0x28244489 // mov dword [rsp + 40], eax QUAD $0x04022a74c40f4266 // pinsrw xmm6, word [rdx + r13 + 2], 4 - LONG $0x6e0f4166; BYTE $0xd6 // movd xmm2, r14d - LONG $0x74b70f46; WORD $0x0e3a // movzx r14d, word [rdx + r15 + 14] + LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d + LONG $0x44b70f42; WORD $0x0e3a // movzx eax, word [rdx + r15 + 14] + LONG $0x18244489 // mov dword [rsp + 24], eax LONG $0x74c40f66; WORD $0x020a; BYTE $0x05 // pinsrw xmm6, word [rdx + rcx + 2], 5 LONG $0xee6e0f66 // movd xmm5, esi LONG $0x74b70f42; WORD $0x103a // movzx esi, word [rdx + r15 + 16] LONG $0x74c40f66; WORD $0x023a; BYTE $0x06 // pinsrw xmm6, word [rdx + rdi + 2], 6 LONG $0x6e0f4166; BYTE $0xdb // movd xmm3, r11d LONG $0x44b70f42; WORD $0x123a // movzx eax, word [rdx + r15 + 18] - LONG $0x28244489 // mov dword [rsp + 40], eax + LONG $0x38244489 // mov dword [rsp + 56], eax LONG $0x74c40f66; WORD $0x021a; BYTE $0x07 // pinsrw xmm6, word [rdx + rbx + 2], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 LONG $0xce6f0f66 // movdqa xmm1, xmm6 LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 LONG $0xcef80f66 // psubb xmm1, xmm6 - LONG $0x746e0f66; WORD $0x1824 // movd xmm6, dword [rsp + 24] + LONG $0x6e0f4166; BYTE $0xf6 // movd xmm6, r14d LONG $0x54b70f46; WORD $0x143a // movzx r10d, word [rdx + r15 + 20] LONG $0xe0750f66 // pcmpeqw xmm4, xmm0 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -13702,7 +14355,7 @@ LBB2_192: LONG $0x5cc40f66; WORD $0x083a; BYTE $0x06 // pinsrw xmm3, word [rdx + rdi + 8], 6 LONG $0x5cc40f66; WORD $0x081a; BYTE $0x07 // pinsrw xmm3, word [rdx + rbx + 8], 7 LONG $0xcceb0f66 // por xmm1, xmm4 - LONG $0x7c6e0f66; WORD $0x1024 // movd xmm7, dword [rsp + 16] + LONG $0x7c6e0f66; WORD $0x2824 // movd xmm7, dword [rsp + 40] LONG $0x44b70f42; WORD $0x163a // movzx eax, word [rdx + r15 + 22] LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -13710,7 +14363,7 @@ LBB2_192: LONG $0xf2710f66; BYTE $0x02 // psllw xmm2, 2 LONG $0xdb0f4166; BYTE $0xd1 // pand xmm2, xmm9 LONG $0xd1eb0f66 // por xmm2, xmm1 - LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d + LONG $0x646e0f66; WORD $0x1824 // movd xmm4, dword [rsp + 24] LONG $0x5cb70f46; WORD $0x183a // movzx r11d, word [rdx + r15 + 24] LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 @@ -13740,7 +14393,7 @@ LBB2_192: LONG $0x7cc40f66; WORD $0x0c3a; BYTE $0x06 // pinsrw xmm7, word [rdx + rdi + 12], 6 LONG $0x7cc40f66; WORD $0x0c1a; BYTE $0x07 // pinsrw xmm7, word [rdx + rbx + 12], 7 LONG $0xdaeb0f66 // por xmm3, xmm2 - LONG $0x6e0f4466; WORD $0x2444; BYTE $0x28 // movd xmm8, dword [rsp + 40] + LONG $0x6e0f4466; WORD $0x2444; BYTE $0x38 // movd xmm8, dword [rsp + 56] LONG $0x74b70f46; WORD $0x1c3a // movzx r14d, word [rdx + r15 + 28] LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -13797,7 +14450,7 @@ LBB2_192: LONG $0xf9eb0f66 // por xmm7, xmm1 LONG $0xf66e0f66 // movd xmm6, esi LONG $0x74b70f42; WORD $0x243a // movzx esi, word [rdx + r15 + 36] - LONG $0x28247489 // mov dword [rsp + 40], esi + LONG $0x38247489 // mov dword [rsp + 56], esi QUAD $0x0114026cc40f4266 // pinsrw xmm5, word [rdx + r8 + 20], 1 QUAD $0x02140a6cc40f4266 // pinsrw xmm5, word [rdx + r9 + 20], 2 QUAD $0x0314226cc40f4266 // pinsrw xmm5, word [rdx + r12 + 20], 3 @@ -13813,7 +14466,7 @@ LBB2_192: LONG $0xefeb0f66 // por xmm5, xmm7 LONG $0x6e0f4166; BYTE $0xfe // movd xmm7, r14d LONG $0x74b70f42; WORD $0x263a // movzx esi, word [rdx + r15 + 38] - LONG $0x10247489 // mov dword [rsp + 16], esi + LONG $0x18247489 // mov dword [rsp + 24], esi QUAD $0x01160254c40f4266 // pinsrw xmm2, word [rdx + r8 + 22], 1 QUAD $0x02160a54c40f4266 // pinsrw xmm2, word [rdx + r9 + 22], 2 QUAD $0x03162254c40f4266 // pinsrw xmm2, word [rdx + r12 + 22], 3 @@ -13844,7 +14497,7 @@ LBB2_192: LONG $0xddeb0f66 // por xmm3, xmm5 LONG $0xe86e0f66 // movd xmm5, eax LONG $0x44b70f42; WORD $0x2a3a // movzx eax, word [rdx + r15 + 42] - LONG $0x18244489 // mov dword [rsp + 24], eax + LONG $0x28244489 // mov dword [rsp + 40], eax QUAD $0x011a0274c40f4266 // pinsrw xmm6, word [rdx + r8 + 26], 1 QUAD $0x021a0a74c40f4266 // pinsrw xmm6, word [rdx + r9 + 26], 2 QUAD $0x031a2274c40f4266 // pinsrw xmm6, word [rdx + r12 + 26], 3 @@ -13884,7 +14537,7 @@ LBB2_192: LONG $0xf2710f66; BYTE $0x07 // psllw xmm2, 7 LONG $0xdb0f4166; BYTE $0xd6 // pand xmm2, xmm14 LONG $0xd7eb0f66 // por xmm2, xmm7 - LONG $0x746e0f66; WORD $0x2824 // movd xmm6, dword [rsp + 40] + LONG $0x746e0f66; WORD $0x3824 // movd xmm6, dword [rsp + 56] LONG $0x74b70f42; WORD $0x2e3a // movzx esi, word [rdx + r15 + 46] QUAD $0x0120026cc40f4266 // pinsrw xmm5, word [rdx + r8 + 32], 1 QUAD $0x02200a6cc40f4266 // pinsrw xmm5, word [rdx + r9 + 32], 2 @@ -13905,7 +14558,7 @@ LBB2_192: LONG $0xf96f0f66 // movdqa xmm7, xmm1 LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 LONG $0xf9f80f66 // psubb xmm7, xmm1 - LONG $0x5c6e0f66; WORD $0x1024 // movd xmm3, dword [rsp + 16] + LONG $0x5c6e0f66; WORD $0x1824 // movd xmm3, dword [rsp + 24] LONG $0x5cb70f46; WORD $0x303a // movzx r11d, word [rdx + r15 + 48] LONG $0x6cc40f66; WORD $0x201a; BYTE $0x07 // pinsrw xmm5, word [rdx + rbx + 32], 7 LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 @@ -13940,7 +14593,7 @@ LBB2_192: LONG $0xf6710f66; BYTE $0x02 // psllw xmm6, 2 LONG $0xdb0f4166; BYTE $0xf1 // pand xmm6, xmm9 LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] + LONG $0x4c6e0f66; WORD $0x2824 // movd xmm1, dword [rsp + 40] LONG $0x74b70f46; WORD $0x343a // movzx r14d, word [rdx + r15 + 52] LONG $0x6cc40f66; WORD $0x281a; BYTE $0x07 // pinsrw xmm5, word [rdx + rbx + 40], 7 LONG $0xd8750f66 // pcmpeqw xmm3, xmm0 @@ -14127,16 +14780,16 @@ LBB2_192: LONG $0x08c18348 // add rcx, 8 WORD $0x8949; BYTE $0xcf // mov r15, rcx LONG $0x244c3b48; BYTE $0x20 // cmp rcx, qword [rsp + 32] - JNE LBB2_192 - QUAD $0x0000009824b48b4c // mov r14, qword [rsp + 152] - LONG $0x24743b4c; BYTE $0x20 // cmp r14, qword [rsp + 32] + JNE LBB2_189 + QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] + LONG $0x247c3b4c; BYTE $0x20 // cmp r15, qword [rsp + 32] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - LONG $0x246c8b44; BYTE $0x38 // mov r13d, dword [rsp + 56] + LONG $0x24748b44; BYTE $0x08 // mov r14d, dword [rsp + 8] LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] JNE LBB2_92 JMP LBB2_139 -LBB2_194: +LBB2_191: LONG $0xf8e78349 // and r15, -8 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x06e0c148 // shl rax, 6 @@ -14145,9 +14798,8 @@ LBB2_194: LONG $0x24048b48 // mov rax, qword [rsp] LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LONG $0xb8048d4a // lea rax, [rax + 4*r15] - LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax - LONG $0x246c8944; BYTE $0x38 // mov dword [rsp + 56], r13d - LONG $0x6e0f4166; BYTE $0xc5 // movd xmm0, r13d + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax + LONG $0x6e0f4166; BYTE $0xc6 // movd xmm0, r14d LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 LONG $0xc0700f66; BYTE $0x00 // pshufd xmm0, xmm0, 0 WORD $0x3145; BYTE $0xff // xor r15d, r15d @@ -14159,7 +14811,7 @@ LBB2_194: LONG $0x6f0f4466; WORD $0x506d // movdqa xmm13, oword 80[rbp] /* [rip + .LCPI2_5] */ LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI2_6] */ -LBB2_195: +LBB2_192: LONG $0x247c894c; BYTE $0x30 // mov qword [rsp + 48], r15 LONG $0x06e7c149 // shl r15, 6 WORD $0x894d; BYTE $0xf9 // mov r9, r15 @@ -14168,9 +14820,9 @@ LBB2_195: WORD $0x894c; BYTE $0xf9 // mov rcx, r15 WORD $0x894c; BYTE $0xff // mov rdi, r15 WORD $0x894c; BYTE $0xfb // mov rbx, r15 - LONG $0x04b70f42; BYTE $0x3a // movzx eax, word [rdx + r15] - LONG $0x54b70f46; WORD $0x023a // movzx r10d, word [rdx + r15 + 2] - LONG $0x74b70f46; WORD $0x043a // movzx r14d, word [rdx + r15 + 4] + LONG $0x34b70f46; BYTE $0x3a // movzx r14d, word [rdx + r15] + LONG $0x44b70f42; WORD $0x023a // movzx eax, word [rdx + r15 + 2] + LONG $0x54b70f46; WORD $0x043a // movzx r10d, word [rdx + r15 + 4] LONG $0x74b70f42; WORD $0x063a // movzx esi, word [rdx + r15 + 6] LONG $0x5cb70f46; WORD $0x083a // movzx r11d, word [rdx + r15 + 8] WORD $0x894d; BYTE $0xf8 // mov r8, r15 @@ -14181,7 +14833,7 @@ LBB2_195: LONG $0x40c98148; WORD $0x0001; BYTE $0x00 // or rcx, 320 LONG $0x80cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 384 LONG $0xc0cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 448 - LONG $0xe06e0f66 // movd xmm4, eax + LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d LONG $0xc40f4266; WORD $0x0224; BYTE $0x01 // pinsrw xmm4, word [rdx + r8], 1 LONG $0xc40f4266; WORD $0x0a24; BYTE $0x02 // pinsrw xmm4, word [rdx + r9], 2 LONG $0xc40f4266; WORD $0x2224; BYTE $0x03 // pinsrw xmm4, word [rdx + r12], 3 @@ -14189,31 +14841,31 @@ LBB2_195: LONG $0x24c40f66; WORD $0x050a // pinsrw xmm4, word [rdx + rcx], 5 LONG $0x24c40f66; WORD $0x063a // pinsrw xmm4, word [rdx + rdi], 6 LONG $0x24c40f66; WORD $0x071a // pinsrw xmm4, word [rdx + rbx], 7 - LONG $0x44b70f42; WORD $0x0a3a // movzx eax, word [rdx + r15 + 10] - LONG $0x18244489 // mov dword [rsp + 24], eax - LONG $0x6e0f4166; BYTE $0xf2 // movd xmm6, r10d + LONG $0x74b70f46; WORD $0x0a3a // movzx r14d, word [rdx + r15 + 10] + LONG $0xf06e0f66 // movd xmm6, eax QUAD $0x01020274c40f4266 // pinsrw xmm6, word [rdx + r8 + 2], 1 QUAD $0x02020a74c40f4266 // pinsrw xmm6, word [rdx + r9 + 2], 2 QUAD $0x03022274c40f4266 // pinsrw xmm6, word [rdx + r12 + 2], 3 LONG $0x44b70f42; WORD $0x0c3a // movzx eax, word [rdx + r15 + 12] - LONG $0x10244489 // mov dword [rsp + 16], eax + LONG $0x28244489 // mov dword [rsp + 40], eax QUAD $0x04022a74c40f4266 // pinsrw xmm6, word [rdx + r13 + 2], 4 - LONG $0x6e0f4166; BYTE $0xd6 // movd xmm2, r14d - LONG $0x74b70f46; WORD $0x0e3a // movzx r14d, word [rdx + r15 + 14] + LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d + LONG $0x44b70f42; WORD $0x0e3a // movzx eax, word [rdx + r15 + 14] + LONG $0x18244489 // mov dword [rsp + 24], eax LONG $0x74c40f66; WORD $0x020a; BYTE $0x05 // pinsrw xmm6, word [rdx + rcx + 2], 5 LONG $0xee6e0f66 // movd xmm5, esi LONG $0x74b70f42; WORD $0x103a // movzx esi, word [rdx + r15 + 16] LONG $0x74c40f66; WORD $0x023a; BYTE $0x06 // pinsrw xmm6, word [rdx + rdi + 2], 6 LONG $0x6e0f4166; BYTE $0xdb // movd xmm3, r11d LONG $0x44b70f42; WORD $0x123a // movzx eax, word [rdx + r15 + 18] - LONG $0x28244489 // mov dword [rsp + 40], eax + LONG $0x38244489 // mov dword [rsp + 56], eax LONG $0x74c40f66; WORD $0x021a; BYTE $0x07 // pinsrw xmm6, word [rdx + rbx + 2], 7 LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 LONG $0xce6f0f66 // movdqa xmm1, xmm6 LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 LONG $0xcef80f66 // psubb xmm1, xmm6 - LONG $0x746e0f66; WORD $0x1824 // movd xmm6, dword [rsp + 24] + LONG $0x6e0f4166; BYTE $0xf6 // movd xmm6, r14d LONG $0x54b70f46; WORD $0x143a // movzx r10d, word [rdx + r15 + 20] LONG $0xe0750f66 // pcmpeqw xmm4, xmm0 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -14240,7 +14892,7 @@ LBB2_195: LONG $0x5cc40f66; WORD $0x083a; BYTE $0x06 // pinsrw xmm3, word [rdx + rdi + 8], 6 LONG $0x5cc40f66; WORD $0x081a; BYTE $0x07 // pinsrw xmm3, word [rdx + rbx + 8], 7 LONG $0xcceb0f66 // por xmm1, xmm4 - LONG $0x7c6e0f66; WORD $0x1024 // movd xmm7, dword [rsp + 16] + LONG $0x7c6e0f66; WORD $0x2824 // movd xmm7, dword [rsp + 40] LONG $0x44b70f42; WORD $0x163a // movzx eax, word [rdx + r15 + 22] LONG $0xd0750f66 // pcmpeqw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -14248,7 +14900,7 @@ LBB2_195: LONG $0xf2710f66; BYTE $0x02 // psllw xmm2, 2 LONG $0xdb0f4166; BYTE $0xd1 // pand xmm2, xmm9 LONG $0xd1eb0f66 // por xmm2, xmm1 - LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d + LONG $0x646e0f66; WORD $0x1824 // movd xmm4, dword [rsp + 24] LONG $0x5cb70f46; WORD $0x183a // movzx r11d, word [rdx + r15 + 24] LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 LONG $0xed630f66 // packsswb xmm5, xmm5 @@ -14278,7 +14930,7 @@ LBB2_195: LONG $0x7cc40f66; WORD $0x0c3a; BYTE $0x06 // pinsrw xmm7, word [rdx + rdi + 12], 6 LONG $0x7cc40f66; WORD $0x0c1a; BYTE $0x07 // pinsrw xmm7, word [rdx + rbx + 12], 7 LONG $0xdaeb0f66 // por xmm3, xmm2 - LONG $0x6e0f4466; WORD $0x2444; BYTE $0x28 // movd xmm8, dword [rsp + 40] + LONG $0x6e0f4466; WORD $0x2444; BYTE $0x38 // movd xmm8, dword [rsp + 56] LONG $0x74b70f46; WORD $0x1c3a // movzx r14d, word [rdx + r15 + 28] LONG $0xf0750f66 // pcmpeqw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -14335,7 +14987,7 @@ LBB2_195: LONG $0xf9eb0f66 // por xmm7, xmm1 LONG $0xf66e0f66 // movd xmm6, esi LONG $0x74b70f42; WORD $0x243a // movzx esi, word [rdx + r15 + 36] - LONG $0x28247489 // mov dword [rsp + 40], esi + LONG $0x38247489 // mov dword [rsp + 56], esi QUAD $0x0114026cc40f4266 // pinsrw xmm5, word [rdx + r8 + 20], 1 QUAD $0x02140a6cc40f4266 // pinsrw xmm5, word [rdx + r9 + 20], 2 QUAD $0x0314226cc40f4266 // pinsrw xmm5, word [rdx + r12 + 20], 3 @@ -14351,7 +15003,7 @@ LBB2_195: LONG $0xefeb0f66 // por xmm5, xmm7 LONG $0x6e0f4166; BYTE $0xfe // movd xmm7, r14d LONG $0x74b70f42; WORD $0x263a // movzx esi, word [rdx + r15 + 38] - LONG $0x10247489 // mov dword [rsp + 16], esi + LONG $0x18247489 // mov dword [rsp + 24], esi QUAD $0x01160254c40f4266 // pinsrw xmm2, word [rdx + r8 + 22], 1 QUAD $0x02160a54c40f4266 // pinsrw xmm2, word [rdx + r9 + 22], 2 QUAD $0x03162254c40f4266 // pinsrw xmm2, word [rdx + r12 + 22], 3 @@ -14382,7 +15034,7 @@ LBB2_195: LONG $0xddeb0f66 // por xmm3, xmm5 LONG $0xe86e0f66 // movd xmm5, eax LONG $0x44b70f42; WORD $0x2a3a // movzx eax, word [rdx + r15 + 42] - LONG $0x18244489 // mov dword [rsp + 24], eax + LONG $0x28244489 // mov dword [rsp + 40], eax QUAD $0x011a0274c40f4266 // pinsrw xmm6, word [rdx + r8 + 26], 1 QUAD $0x021a0a74c40f4266 // pinsrw xmm6, word [rdx + r9 + 26], 2 QUAD $0x031a2274c40f4266 // pinsrw xmm6, word [rdx + r12 + 26], 3 @@ -14422,7 +15074,7 @@ LBB2_195: LONG $0xf2710f66; BYTE $0x07 // psllw xmm2, 7 LONG $0xdb0f4166; BYTE $0xd6 // pand xmm2, xmm14 LONG $0xd7eb0f66 // por xmm2, xmm7 - LONG $0x746e0f66; WORD $0x2824 // movd xmm6, dword [rsp + 40] + LONG $0x746e0f66; WORD $0x3824 // movd xmm6, dword [rsp + 56] LONG $0x74b70f42; WORD $0x2e3a // movzx esi, word [rdx + r15 + 46] QUAD $0x0120026cc40f4266 // pinsrw xmm5, word [rdx + r8 + 32], 1 QUAD $0x02200a6cc40f4266 // pinsrw xmm5, word [rdx + r9 + 32], 2 @@ -14443,7 +15095,7 @@ LBB2_195: LONG $0xf96f0f66 // movdqa xmm7, xmm1 LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 LONG $0xf9f80f66 // psubb xmm7, xmm1 - LONG $0x5c6e0f66; WORD $0x1024 // movd xmm3, dword [rsp + 16] + LONG $0x5c6e0f66; WORD $0x1824 // movd xmm3, dword [rsp + 24] LONG $0x5cb70f46; WORD $0x303a // movzx r11d, word [rdx + r15 + 48] LONG $0x6cc40f66; WORD $0x201a; BYTE $0x07 // pinsrw xmm5, word [rdx + rbx + 32], 7 LONG $0xe8750f66 // pcmpeqw xmm5, xmm0 @@ -14478,7 +15130,7 @@ LBB2_195: LONG $0xf6710f66; BYTE $0x02 // psllw xmm6, 2 LONG $0xdb0f4166; BYTE $0xf1 // pand xmm6, xmm9 LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] + LONG $0x4c6e0f66; WORD $0x2824 // movd xmm1, dword [rsp + 40] LONG $0x74b70f46; WORD $0x343a // movzx r14d, word [rdx + r15 + 52] LONG $0x6cc40f66; WORD $0x281a; BYTE $0x07 // pinsrw xmm5, word [rdx + rbx + 40], 7 LONG $0xd8750f66 // pcmpeqw xmm3, xmm0 @@ -14665,24 +15317,23 @@ LBB2_195: LONG $0x08c18348 // add rcx, 8 WORD $0x8949; BYTE $0xcf // mov r15, rcx LONG $0x244c3b48; BYTE $0x20 // cmp rcx, qword [rsp + 32] - JNE LBB2_195 + JNE LBB2_192 QUAD $0x0000009824bc8b4c // mov r15, qword [rsp + 152] LONG $0x247c3b4c; BYTE $0x20 // cmp r15, qword [rsp + 32] QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] - LONG $0x246c8b44; BYTE $0x38 // mov r13d, dword [rsp + 56] - LONG $0x24748b4c; BYTE $0x08 // mov r14, qword [rsp + 8] + LONG $0x24748b44; BYTE $0x08 // mov r14d, dword [rsp + 8] LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] JNE LBB2_104 - JMP LBB2_143 + JMP LBB2_144 -LBB2_197: +LBB2_194: WORD $0x894d; BYTE $0xf0 // mov r8, r14 LONG $0xfce08349 // and r8, -4 WORD $0x894c; BYTE $0xc3 // mov rbx, r8 LONG $0x07e3c148 // shl rbx, 7 WORD $0x0148; BYTE $0xd3 // add rbx, rdx LONG $0x24048b48 // mov rax, qword [rsp] - LONG $0x801c8d4e // lea r11, [rax + 4*r8] + LONG $0x803c8d4e // lea r15, [rax + 4*r8] WORD $0x280f; BYTE $0xc8 // movaps xmm1, xmm0 LONG $0x00c8c60f // shufps xmm1, xmm0, 0 LONG $0xfcc28148; WORD $0x0001; BYTE $0x00 // add rdx, 508 @@ -14697,7 +15348,7 @@ LBB2_197: LONG $0x6f0f4466; WORD $0x704d // movdqa xmm9, oword 112[rbp] /* [rip + .LCPI2_7] */ LONG $0x24048b48 // mov rax, qword [rsp] -LBB2_198: +LBB2_195: QUAD $0xfffffe04b2100ff3 // movss xmm6, dword [rdx - 508] QUAD $0xfffffe08ba100ff3 // movss xmm7, dword [rdx - 504] QUAD $0xfffffe0caa100ff3 // movss xmm5, dword [rdx - 500] @@ -15042,10 +15693,10 @@ LBB2_198: LONG $0x04c18348 // add rcx, 4 LONG $0x00c28148; WORD $0x0002; BYTE $0x00 // add rdx, 512 WORD $0x3949; BYTE $0xc8 // cmp r8, rcx - JNE LBB2_198 + JNE LBB2_195 WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB2_127 - JMP LBB2_147 + JMP LBB2_148 TEXT ·_comparison_not_equal_arr_arr_sse4(SB), $80-48 @@ -15057,8 +15708,9 @@ TEXT ·_comparison_not_equal_arr_arr_sse4(SB), $80-48 MOVQ offset+40(FP), R9 ADDQ $8, SP - WORD $0x894d; BYTE $0xc3 // mov r11, r8 - WORD $0x8949; BYTE $0xce // mov r14, rcx + WORD $0x8944; BYTE $0xc8 // mov eax, r9d + WORD $0x894d; BYTE $0xc6 // mov r14, r8 + WORD $0x8949; BYTE $0xcc // mov r12, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB3_29 WORD $0xff83; BYTE $0x03 // cmp edi, 3 @@ -15069,16 +15721,16 @@ TEXT ·_comparison_not_equal_arr_arr_sse4(SB), $80-48 JE LBB3_79 WORD $0xff83; BYTE $0x06 // cmp edi, 6 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_22 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_20: WORD $0x0e8b // mov ecx, dword [rsi] @@ -15091,7 +15743,7 @@ LBB3_20: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -15100,49 +15752,49 @@ LBB3_20: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_20 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_22: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_26 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_24: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5950f41 // setne r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0950f41 // setne r8b @@ -15154,165 +15806,165 @@ LBB3_24: LONG $0xd7950f41 // setne r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2950f41 // setne r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6950f41 // setne r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4950f41 // setne r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1950f41 // setne r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_24 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_26: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_28: @@ -15323,16 +15975,16 @@ LBB3_28: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_28 JMP LBB3_123 @@ -15345,266 +15997,361 @@ LBB3_29: JE LBB3_112 WORD $0xff83; BYTE $0x0c // cmp edi, 12 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_50 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB3_48: LONG $0x06100ff2 // movsd xmm0, qword [rsi] LONG $0x08c68348 // add rsi, 8 LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] LONG $0x08528d48 // lea rdx, [rdx + 8] - LONG $0xd2950f41 // setne r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xcb08 // or bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB3_48 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_50: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_54 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB3_52: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x06100ff2 // movsd xmm0, qword [rsi] - LONG $0x4e100ff2; BYTE $0x08 // movsd xmm1, qword [rsi + 8] LONG $0x022e0f66 // ucomisd xmm0, qword [rdx] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] - LONG $0x4a2e0f66; BYTE $0x08 // ucomisd xmm1, qword [rdx + 8] - WORD $0x950f; BYTE $0xd0 // setne al + LONG $0x46100ff2; BYTE $0x08 // movsd xmm0, qword [rsi + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x422e0f66; BYTE $0x08 // ucomisd xmm0, qword [rdx + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x46100ff2; BYTE $0x10 // movsd xmm0, qword [rsi + 16] LONG $0x422e0f66; BYTE $0x10 // ucomisd xmm0, qword [rdx + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x46100ff2; BYTE $0x18 // movsd xmm0, qword [rsi + 24] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x422e0f66; BYTE $0x18 // ucomisd xmm0, qword [rdx + 24] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x46100ff2; BYTE $0x20 // movsd xmm0, qword [rsi + 32] LONG $0x422e0f66; BYTE $0x20 // ucomisd xmm0, qword [rdx + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x46100ff2; BYTE $0x28 // movsd xmm0, qword [rsi + 40] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x422e0f66; BYTE $0x28 // ucomisd xmm0, qword [rdx + 40] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x46100ff2; BYTE $0x30 // movsd xmm0, qword [rsi + 48] LONG $0x422e0f66; BYTE $0x30 // ucomisd xmm0, qword [rdx + 48] - LONG $0x46100ff2; BYTE $0x38 // movsd xmm0, qword [rsi + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x46100ff2; BYTE $0x38 // movsd xmm0, qword [rsi + 56] LONG $0x422e0f66; BYTE $0x38 // ucomisd xmm0, qword [rdx + 56] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x46100ff2; BYTE $0x40 // movsd xmm0, qword [rsi + 64] LONG $0x422e0f66; BYTE $0x40 // ucomisd xmm0, qword [rdx + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x46100ff2; BYTE $0x48 // movsd xmm0, qword [rsi + 72] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x422e0f66; BYTE $0x48 // ucomisd xmm0, qword [rdx + 72] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x46100ff2; BYTE $0x50 // movsd xmm0, qword [rsi + 80] LONG $0x422e0f66; BYTE $0x50 // ucomisd xmm0, qword [rdx + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x46100ff2; BYTE $0x58 // movsd xmm0, qword [rsi + 88] - LONG $0xd1950f41 // setne r9b LONG $0x422e0f66; BYTE $0x58 // ucomisd xmm0, qword [rdx + 88] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x46100ff2; BYTE $0x60 // movsd xmm0, qword [rsi + 96] LONG $0x422e0f66; BYTE $0x60 // ucomisd xmm0, qword [rdx + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x46100ff2; BYTE $0x68 // movsd xmm0, qword [rsi + 104] - LONG $0xd2950f41 // setne r10b LONG $0x422e0f66; BYTE $0x68 // ucomisd xmm0, qword [rdx + 104] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x46100ff2; BYTE $0x70 // movsd xmm0, qword [rsi + 112] LONG $0x422e0f66; BYTE $0x70 // ucomisd xmm0, qword [rdx + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x46100ff2; BYTE $0x78 // movsd xmm0, qword [rsi + 120] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x422e0f66; BYTE $0x78 // ucomisd xmm0, qword [rdx + 120] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl QUAD $0x0000008086100ff2 // movsd xmm0, qword [rsi + 128] QUAD $0x00000080822e0f66 // ucomisd xmm0, qword [rdx + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl QUAD $0x0000008886100ff2 // movsd xmm0, qword [rsi + 136] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] QUAD $0x00000088822e0f66 // ucomisd xmm0, qword [rdx + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl QUAD $0x0000009086100ff2 // movsd xmm0, qword [rsi + 144] - LONG $0xd6950f41 // setne r14b QUAD $0x00000090822e0f66 // ucomisd xmm0, qword [rdx + 144] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl QUAD $0x0000009886100ff2 // movsd xmm0, qword [rsi + 152] - LONG $0xd4950f41 // setne r12b QUAD $0x00000098822e0f66 // ucomisd xmm0, qword [rdx + 152] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al QUAD $0x000000a086100ff2 // movsd xmm0, qword [rsi + 160] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] QUAD $0x000000a0822e0f66 // ucomisd xmm0, qword [rdx + 160] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl QUAD $0x000000a886100ff2 // movsd xmm0, qword [rsi + 168] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] QUAD $0x000000a8822e0f66 // ucomisd xmm0, qword [rdx + 168] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl QUAD $0x000000b086100ff2 // movsd xmm0, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] QUAD $0x000000b0822e0f66 // ucomisd xmm0, qword [rdx + 176] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl QUAD $0x000000b886100ff2 // movsd xmm0, qword [rsi + 184] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] QUAD $0x000000b8822e0f66 // ucomisd xmm0, qword [rdx + 184] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al QUAD $0x000000c086100ff2 // movsd xmm0, qword [rsi + 192] - LONG $0xd0950f41 // setne r8b QUAD $0x000000c0822e0f66 // ucomisd xmm0, qword [rdx + 192] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl QUAD $0x000000c886100ff2 // movsd xmm0, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] QUAD $0x000000c8822e0f66 // ucomisd xmm0, qword [rdx + 200] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl QUAD $0x000000d086100ff2 // movsd xmm0, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] QUAD $0x000000d0822e0f66 // ucomisd xmm0, qword [rdx + 208] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al QUAD $0x000000d886100ff2 // movsd xmm0, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] QUAD $0x000000d8822e0f66 // ucomisd xmm0, qword [rdx + 216] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000e086100ff2 // movsd xmm0, qword [rsi + 224] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] QUAD $0x000000e0822e0f66 // ucomisd xmm0, qword [rdx + 224] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl QUAD $0x000000e886100ff2 // movsd xmm0, qword [rsi + 232] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] QUAD $0x000000e8822e0f66 // ucomisd xmm0, qword [rdx + 232] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl QUAD $0x000000f086100ff2 // movsd xmm0, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] QUAD $0x000000f0822e0f66 // ucomisd xmm0, qword [rdx + 240] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al QUAD $0x000000f886100ff2 // movsd xmm0, qword [rsi + 248] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 QUAD $0x000000f8822e0f66 // ucomisd xmm0, qword [rdx + 248] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 + LONG $0x04e5c041 // shl r13b, 4 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0841; BYTE $0xce // or r14b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xf3 // or bl, r14b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x74b60f44; WORD $0x0324 // movzx r14d, byte [rsp + 3] + WORD $0x0845; BYTE $0xee // or r14b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e7c041 // shl r15b, 7 + WORD $0x0841; BYTE $0xcf // or r15b, cl + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x03e2c041 // shl r10b, 3 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xd1 // or cl, r10b + LONG $0x24348845 // mov byte [r12], r14b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB3_52 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB3_54: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_56: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x04100ff2; BYTE $0xce // movsd xmm0, qword [rsi + 8*rcx] LONG $0x042e0f66; BYTE $0xca // ucomisd xmm0, qword [rdx + 8*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl + WORD $0x9a0f; BYTE $0xd3 // setp bl + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd808 // or al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB3_56 JMP LBB3_123 @@ -15613,16 +16360,16 @@ LBB3_2: JE LBB3_57 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_8 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_6: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -15635,7 +16382,7 @@ LBB3_6: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -15644,49 +16391,49 @@ LBB3_6: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_6 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_8: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_12 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB3_10: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7950f41 // setne r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7950f40 // setne dil @@ -15701,16 +16448,16 @@ LBB3_10: LONG $0xd6950f41 // setne r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd0950f41 // setne r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4950f41 // setne r12b @@ -15719,144 +16466,144 @@ LBB3_10: LONG $0xd5950f41 // setne r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1950f41 // setne r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0950f41 // setne r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB3_10 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB3_12: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_14: @@ -15867,16 +16614,16 @@ LBB3_14: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_14 JMP LBB3_123 @@ -15885,16 +16632,16 @@ LBB3_30: JE LBB3_90 WORD $0xff83; BYTE $0x08 // cmp edi, 8 JNE LBB3_123 - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_36 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_34: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -15907,7 +16654,7 @@ LBB3_34: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -15916,49 +16663,49 @@ LBB3_34: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_34 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_36: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_40 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_38: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5950f41 // setne r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0950f41 // setne r8b @@ -15970,165 +16717,165 @@ LBB3_38: LONG $0xd7950f41 // setne r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2950f41 // setne r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6950f41 // setne r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4950f41 // setne r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1950f41 // setne r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_38 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_40: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_42: @@ -16139,30 +16886,30 @@ LBB3_42: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_42 JMP LBB3_123 LBB3_68: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_72 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_70: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -16175,7 +16922,7 @@ LBB3_70: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -16184,49 +16931,49 @@ LBB3_70: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_70 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_72: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_76 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_74: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5950f41 // setne r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0950f41 // setne r8b @@ -16238,165 +16985,165 @@ LBB3_74: LONG $0xd7950f41 // setne r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2950f41 // setne r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6950f41 // setne r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4950f41 // setne r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1950f41 // setne r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_74 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_76: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_78: @@ -16407,30 +17154,30 @@ LBB3_78: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_78 JMP LBB3_123 LBB3_79: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_83 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_81: WORD $0xb70f; BYTE $0x0e // movzx ecx, word [rsi] @@ -16443,7 +17190,7 @@ LBB3_81: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -16452,49 +17199,49 @@ LBB3_81: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_81 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_83: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_87 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_85: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb70f; BYTE $0x06 // movzx eax, word [rsi] LONG $0x024eb70f // movzx ecx, word [rsi + 2] WORD $0x3b66; BYTE $0x02 // cmp ax, word [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x024a3b66 // cmp cx, word [rdx + 2] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x0446b70f // movzx eax, word [rsi + 4] LONG $0x04423b66 // cmp ax, word [rdx + 4] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0646b70f // movzx eax, word [rsi + 6] LONG $0x06423b66 // cmp ax, word [rdx + 6] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0846b70f // movzx eax, word [rsi + 8] LONG $0x08423b66 // cmp ax, word [rdx + 8] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0a46b70f // movzx eax, word [rsi + 10] LONG $0x0a423b66 // cmp ax, word [rdx + 10] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0c46b70f // movzx eax, word [rsi + 12] LONG $0x0c423b66 // cmp ax, word [rdx + 12] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0e46b70f // movzx eax, word [rsi + 14] LONG $0x0e423b66 // cmp ax, word [rdx + 14] LONG $0xd5950f41 // setne r13b LONG $0x1046b70f // movzx eax, word [rsi + 16] LONG $0x10423b66 // cmp ax, word [rdx + 16] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1246b70f // movzx eax, word [rsi + 18] LONG $0x12423b66 // cmp ax, word [rdx + 18] LONG $0xd0950f41 // setne r8b @@ -16506,165 +17253,165 @@ LBB3_85: LONG $0xd7950f41 // setne r15b LONG $0x1846b70f // movzx eax, word [rsi + 24] LONG $0x18423b66 // cmp ax, word [rdx + 24] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x1a46b70f // movzx eax, word [rsi + 26] LONG $0x1a423b66 // cmp ax, word [rdx + 26] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x1c46b70f // movzx eax, word [rsi + 28] LONG $0x1c423b66 // cmp ax, word [rdx + 28] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x1e46b70f // movzx eax, word [rsi + 30] LONG $0x1e423b66 // cmp ax, word [rdx + 30] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x2046b70f // movzx eax, word [rsi + 32] - LONG $0x224eb70f // movzx ecx, word [rsi + 34] + LONG $0x225eb70f // movzx ebx, word [rsi + 34] LONG $0x20423b66 // cmp ax, word [rdx + 32] LONG $0x2446b70f // movzx eax, word [rsi + 36] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x224a3b66 // cmp cx, word [rdx + 34] - LONG $0x264eb70f // movzx ecx, word [rsi + 38] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x225a3b66 // cmp bx, word [rdx + 34] + LONG $0x265eb70f // movzx ebx, word [rsi + 38] LONG $0xd2950f41 // setne r10b LONG $0x24423b66 // cmp ax, word [rdx + 36] LONG $0x2846b70f // movzx eax, word [rsi + 40] LONG $0xd6950f41 // setne r14b - LONG $0x264a3b66 // cmp cx, word [rdx + 38] - LONG $0x2a4eb70f // movzx ecx, word [rsi + 42] + LONG $0x265a3b66 // cmp bx, word [rdx + 38] + LONG $0x2a5eb70f // movzx ebx, word [rsi + 42] LONG $0xd4950f41 // setne r12b LONG $0x28423b66 // cmp ax, word [rdx + 40] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0x2a4a3b66 // cmp cx, word [rdx + 42] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2a5a3b66 // cmp bx, word [rdx + 42] LONG $0x2c46b70f // movzx eax, word [rsi + 44] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x2c423b66 // cmp ax, word [rdx + 44] LONG $0x2e46b70f // movzx eax, word [rsi + 46] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x2e423b66 // cmp ax, word [rdx + 46] LONG $0x3046b70f // movzx eax, word [rsi + 48] LONG $0xd1950f41 // setne r9b LONG $0x30423b66 // cmp ax, word [rdx + 48] LONG $0x3246b70f // movzx eax, word [rsi + 50] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x32423b66 // cmp ax, word [rdx + 50] LONG $0x3446b70f // movzx eax, word [rsi + 52] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x34423b66 // cmp ax, word [rdx + 52] LONG $0x3646b70f // movzx eax, word [rsi + 54] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x36423b66 // cmp ax, word [rdx + 54] LONG $0x3846b70f // movzx eax, word [rsi + 56] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x38423b66 // cmp ax, word [rdx + 56] LONG $0x3a46b70f // movzx eax, word [rsi + 58] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x3a423b66 // cmp ax, word [rdx + 58] LONG $0x3c46b70f // movzx eax, word [rsi + 60] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x3c423b66 // cmp ax, word [rdx + 60] LONG $0x3e46b70f // movzx eax, word [rsi + 62] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x40c68348 // add rsi, 64 LONG $0x3e423b66 // cmp ax, word [rdx + 62] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x40c28348 // add rdx, 64 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_85 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_87: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_89: @@ -16675,30 +17422,30 @@ LBB3_89: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_89 JMP LBB3_123 LBB3_101: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_105 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_103: WORD $0x8b48; BYTE $0x0e // mov rcx, qword [rsi] @@ -16711,7 +17458,7 @@ LBB3_103: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -16720,49 +17467,49 @@ LBB3_103: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_103 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_105: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_109 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_107: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x8b48; BYTE $0x06 // mov rax, qword [rsi] LONG $0x084e8b48 // mov rcx, qword [rsi + 8] WORD $0x3b48; BYTE $0x02 // cmp rax, qword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] LONG $0x084a3b48 // cmp rcx, qword [rdx + 8] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x10468b48 // mov rax, qword [rsi + 16] LONG $0x10423b48 // cmp rax, qword [rdx + 16] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x18468b48 // mov rax, qword [rsi + 24] LONG $0x18423b48 // cmp rax, qword [rdx + 24] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x20468b48 // mov rax, qword [rsi + 32] LONG $0x20423b48 // cmp rax, qword [rdx + 32] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x28468b48 // mov rax, qword [rsi + 40] LONG $0x28423b48 // cmp rax, qword [rdx + 40] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x30468b48 // mov rax, qword [rsi + 48] LONG $0x30423b48 // cmp rax, qword [rdx + 48] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x38468b48 // mov rax, qword [rsi + 56] LONG $0x38423b48 // cmp rax, qword [rdx + 56] LONG $0xd5950f41 // setne r13b LONG $0x40468b48 // mov rax, qword [rsi + 64] LONG $0x40423b48 // cmp rax, qword [rdx + 64] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x48468b48 // mov rax, qword [rsi + 72] LONG $0x48423b48 // cmp rax, qword [rdx + 72] LONG $0xd0950f41 // setne r8b @@ -16774,165 +17521,165 @@ LBB3_107: LONG $0xd7950f41 // setne r15b LONG $0x60468b48 // mov rax, qword [rsi + 96] LONG $0x60423b48 // cmp rax, qword [rdx + 96] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x68468b48 // mov rax, qword [rsi + 104] LONG $0x68423b48 // cmp rax, qword [rdx + 104] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x70468b48 // mov rax, qword [rsi + 112] LONG $0x70423b48 // cmp rax, qword [rdx + 112] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x78468b48 // mov rax, qword [rsi + 120] LONG $0x78423b48 // cmp rax, qword [rdx + 120] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil LONG $0x80868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 128] - LONG $0x888e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 136] + LONG $0x889e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 136] LONG $0x80823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 128] LONG $0x90868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 144] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - LONG $0x888a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 136] - LONG $0x988e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 152] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x889a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 136] + LONG $0x989e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 152] LONG $0xd2950f41 // setne r10b LONG $0x90823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 144] LONG $0xa0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 160] LONG $0xd6950f41 // setne r14b - LONG $0x988a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 152] - LONG $0xa88e8b48; WORD $0x0000; BYTE $0x00 // mov rcx, qword [rsi + 168] + LONG $0x989a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 152] + LONG $0xa89e8b48; WORD $0x0000; BYTE $0x00 // mov rbx, qword [rsi + 168] LONG $0xd4950f41 // setne r12b LONG $0xa0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 160] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - LONG $0xa88a3b48; WORD $0x0000; BYTE $0x00 // cmp rcx, qword [rdx + 168] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0xa89a3b48; WORD $0x0000; BYTE $0x00 // cmp rbx, qword [rdx + 168] LONG $0xb0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0xb0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 176] LONG $0xb8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 184] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0xb8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 184] LONG $0xc0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 192] LONG $0xd1950f41 // setne r9b LONG $0xc0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 192] LONG $0xc8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0xc8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 200] LONG $0xd0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0xd0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 208] LONG $0xd8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0xd8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 216] LONG $0xe0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 224] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xe0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 224] LONG $0xe8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 232] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0xe8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 232] LONG $0xf0868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0xf0823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 240] LONG $0xf8868b48; WORD $0x0000; BYTE $0x00 // mov rax, qword [rsi + 248] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0xf8823b48; WORD $0x0000; BYTE $0x00 // cmp rax, qword [rdx + 248] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x00c28148; WORD $0x0001; BYTE $0x00 // add rdx, 256 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_107 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_109: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_111: @@ -16943,294 +17690,389 @@ LBB3_111: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_111 JMP LBB3_123 LBB3_112: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_116 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x634c; BYTE $0xd0 // movsxd r10, eax LBB3_114: LONG $0x06100ff3 // movss xmm0, dword [rsi] LONG $0x04c68348 // add rsi, 4 WORD $0x2e0f; BYTE $0x02 // ucomiss xmm0, dword [rdx] LONG $0x04528d48 // lea rdx, [rdx + 4] - LONG $0xd2950f41 // setne r10b - WORD $0xf641; BYTE $0xda // neg r10b - LONG $0x07788d48 // lea rdi, [rax + 7] - WORD $0x8548; BYTE $0xc0 // test rax, rax - LONG $0xf8490f48 // cmovns rdi, rax + WORD $0x9a0f; BYTE $0xd1 // setp cl + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xcb08 // or bl, cl + WORD $0xdbf6 // neg bl + LONG $0x077a8d49 // lea rdi, [r10 + 7] + WORD $0x854d; BYTE $0xd2 // test r10, r10 + LONG $0xfa490f49 // cmovns rdi, r10 LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] - WORD $0x3045; BYTE $0xc2 // xor r10b, r8b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc3 // xor bl, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] - WORD $0xc189 // mov ecx, eax + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0x2944; BYTE $0xc9 // sub ecx, r9d - LONG $0x000001bb; BYTE $0x00 // mov ebx, 1 - WORD $0xe3d3 // shl ebx, cl - WORD $0x2044; BYTE $0xd3 // and bl, r10b - WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x01c08348 // add rax, 1 - LONG $0x08f88348 // cmp rax, 8 + LONG $0x000001b8; BYTE $0x00 // mov eax, 1 + WORD $0xe0d3 // shl eax, cl + WORD $0xd820 // and al, bl + WORD $0x3044; BYTE $0xc0 // xor al, r8b + LONG $0x3c048841 // mov byte [r12 + rdi], al + LONG $0x01c28349 // add r10, 1 + LONG $0x08fa8349 // cmp r10, 8 JNE LBB3_114 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_116: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_120 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 + LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 LBB3_118: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 LONG $0x06100ff3 // movss xmm0, dword [rsi] - LONG $0x4e100ff3; BYTE $0x04 // movss xmm1, dword [rsi + 4] WORD $0x2e0f; BYTE $0x02 // ucomiss xmm0, dword [rdx] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] - LONG $0x044a2e0f // ucomiss xmm1, dword [rdx + 4] - WORD $0x950f; BYTE $0xd0 // setne al + LONG $0x46100ff3; BYTE $0x04 // movss xmm0, dword [rsi + 4] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x15244c88 // mov byte [rsp + 21], cl + LONG $0x04422e0f // ucomiss xmm0, dword [rdx + 4] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0d244c88 // mov byte [rsp + 13], cl LONG $0x46100ff3; BYTE $0x08 // movss xmm0, dword [rsi + 8] LONG $0x08422e0f // ucomiss xmm0, dword [rdx + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x14244c88 // mov byte [rsp + 20], cl LONG $0x46100ff3; BYTE $0x0c // movss xmm0, dword [rsi + 12] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0c422e0f // ucomiss xmm0, dword [rdx + 12] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x17244c88 // mov byte [rsp + 23], cl LONG $0x46100ff3; BYTE $0x10 // movss xmm0, dword [rsi + 16] LONG $0x10422e0f // ucomiss xmm0, dword [rdx + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x16244c88 // mov byte [rsp + 22], cl LONG $0x46100ff3; BYTE $0x14 // movss xmm0, dword [rsi + 20] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x14422e0f // ucomiss xmm0, dword [rdx + 20] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x46100ff3; BYTE $0x18 // movss xmm0, dword [rsi + 24] LONG $0x18422e0f // ucomiss xmm0, dword [rdx + 24] - LONG $0x46100ff3; BYTE $0x1c // movss xmm0, dword [rsi + 28] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x46100ff3; BYTE $0x1c // movss xmm0, dword [rsi + 28] LONG $0x1c422e0f // ucomiss xmm0, dword [rdx + 28] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x03244c88 // mov byte [rsp + 3], cl LONG $0x46100ff3; BYTE $0x20 // movss xmm0, dword [rsi + 32] LONG $0x20422e0f // ucomiss xmm0, dword [rdx + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x13244c88 // mov byte [rsp + 19], cl LONG $0x46100ff3; BYTE $0x24 // movss xmm0, dword [rsi + 36] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x24422e0f // ucomiss xmm0, dword [rdx + 36] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x46100ff3; BYTE $0x28 // movss xmm0, dword [rsi + 40] LONG $0x28422e0f // ucomiss xmm0, dword [rdx + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x12244c88 // mov byte [rsp + 18], cl LONG $0x46100ff3; BYTE $0x2c // movss xmm0, dword [rsi + 44] - LONG $0xd1950f41 // setne r9b LONG $0x2c422e0f // ucomiss xmm0, dword [rdx + 44] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x11244c88 // mov byte [rsp + 17], cl LONG $0x46100ff3; BYTE $0x30 // movss xmm0, dword [rsi + 48] LONG $0x30422e0f // ucomiss xmm0, dword [rdx + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x46100ff3; BYTE $0x34 // movss xmm0, dword [rsi + 52] - LONG $0xd2950f41 // setne r10b LONG $0x34422e0f // ucomiss xmm0, dword [rdx + 52] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0f244c88 // mov byte [rsp + 15], cl LONG $0x46100ff3; BYTE $0x38 // movss xmm0, dword [rsi + 56] LONG $0x38422e0f // ucomiss xmm0, dword [rdx + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0c244c88 // mov byte [rsp + 12], cl LONG $0x46100ff3; BYTE $0x3c // movss xmm0, dword [rsi + 60] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x3c422e0f // ucomiss xmm0, dword [rdx + 60] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0b244c88 // mov byte [rsp + 11], cl LONG $0x46100ff3; BYTE $0x40 // movss xmm0, dword [rsi + 64] LONG $0x40422e0f // ucomiss xmm0, dword [rdx + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0e244c88 // mov byte [rsp + 14], cl LONG $0x46100ff3; BYTE $0x44 // movss xmm0, dword [rsi + 68] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x44422e0f // ucomiss xmm0, dword [rdx + 68] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x0a244c88 // mov byte [rsp + 10], cl LONG $0x46100ff3; BYTE $0x48 // movss xmm0, dword [rsi + 72] - LONG $0xd6950f41 // setne r14b LONG $0x48422e0f // ucomiss xmm0, dword [rdx + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x09244c88 // mov byte [rsp + 9], cl LONG $0x46100ff3; BYTE $0x4c // movss xmm0, dword [rsi + 76] - LONG $0xd4950f41 // setne r12b LONG $0x4c422e0f // ucomiss xmm0, dword [rdx + 76] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al LONG $0x46100ff3; BYTE $0x50 // movss xmm0, dword [rsi + 80] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] LONG $0x50422e0f // ucomiss xmm0, dword [rdx + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x46100ff3; BYTE $0x54 // movss xmm0, dword [rsi + 84] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x54422e0f // ucomiss xmm0, dword [rdx + 84] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x07244c88 // mov byte [rsp + 7], cl LONG $0x46100ff3; BYTE $0x58 // movss xmm0, dword [rsi + 88] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] LONG $0x58422e0f // ucomiss xmm0, dword [rdx + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x05244c88 // mov byte [rsp + 5], cl LONG $0x46100ff3; BYTE $0x5c // movss xmm0, dword [rsi + 92] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x5c422e0f // ucomiss xmm0, dword [rdx + 92] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al LONG $0x46100ff3; BYTE $0x60 // movss xmm0, dword [rsi + 96] - LONG $0xd0950f41 // setne r8b LONG $0x60422e0f // ucomiss xmm0, dword [rdx + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x06244c88 // mov byte [rsp + 6], cl LONG $0x46100ff3; BYTE $0x64 // movss xmm0, dword [rsi + 100] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x64422e0f // ucomiss xmm0, dword [rdx + 100] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x46100ff3; BYTE $0x68 // movss xmm0, dword [rsi + 104] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x68422e0f // ucomiss xmm0, dword [rdx + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al LONG $0x46100ff3; BYTE $0x6c // movss xmm0, dword [rsi + 108] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x6c422e0f // ucomiss xmm0, dword [rdx + 108] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al LONG $0x46100ff3; BYTE $0x70 // movss xmm0, dword [rsi + 112] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x70422e0f // ucomiss xmm0, dword [rdx + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x04244c88 // mov byte [rsp + 4], cl LONG $0x46100ff3; BYTE $0x74 // movss xmm0, dword [rsi + 116] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x74422e0f // ucomiss xmm0, dword [rdx + 116] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x46100ff3; BYTE $0x78 // movss xmm0, dword [rsi + 120] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x78422e0f // ucomiss xmm0, dword [rdx + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x46100ff3; BYTE $0x7c // movss xmm0, dword [rsi + 124] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 LONG $0x7c422e0f // ucomiss xmm0, dword [rdx + 124] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x04244402 // add al, byte [rsp + 4] + LONG $0x15244402 // add al, byte [rsp + 21] + LONG $0x05e7c040 // shl dil, 5 LONG $0x06e5c041 // shl r13b, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0845; BYTE $0xef // or r15b, r13b - LONG $0x6cb60f44; WORD $0x0524 // movzx r13d, byte [rsp + 5] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0841; BYTE $0xc5 // or r13b, al - WORD $0x8944; BYTE $0xe8 // mov eax, r13d - WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8944; BYTE $0xef // mov edi, r13d + LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xc108 // or cl, al + WORD $0xd889 // mov eax, ebx + WORD $0xd800 // add al, bl + LONG $0x13244402 // add al, byte [rsp + 19] + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x03 // movzx eax, byte [rsp + 3] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x03244488 // mov byte [rsp + 3], al + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x17 // movzx eax, byte [rsp + 23] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x11 // movzx ebx, byte [rsp + 17] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x6cb60f44; WORD $0x1624 // movzx r13d, byte [rsp + 22] - LONG $0x03e5c041 // shl r13b, 3 + LONG $0x04e5c041 // shl r13b, 4 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - WORD $0x8941; BYTE $0xcd // mov r13d, ecx - LONG $0x03e3c041 // shl r11b, 3 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xe9 // or cl, r13b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xda // or r10b, r11b - LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0x0844; BYTE $0xd0 // or al, r10b - LONG $0x4cb60f44; WORD $0x0624 // movzx r9d, byte [rsp + 6] - LONG $0x06e1c041 // shl r9b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xcb // or bl, r9b - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al - WORD $0x0045; BYTE $0xf6 // add r14b, r14b - LONG $0x24740244; BYTE $0x0e // add r14b, byte [rsp + 14] - LONG $0x02e4c041 // shl r12b, 2 - WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xd808 // or al, bl + WORD $0xc789 // mov edi, eax + LONG $0x245cb60f; BYTE $0x0f // movzx ebx, byte [rsp + 15] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x0a // movzx ebx, byte [rsp + 10] + WORD $0xdb00 // add bl, bl + LONG $0x0e245c02 // add bl, byte [rsp + 14] + LONG $0x244cb60f; BYTE $0x09 // movzx ecx, byte [rsp + 9] + WORD $0xe1c0; BYTE $0x02 // shl cl, 2 + WORD $0xd908 // or cl, bl + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0841; BYTE $0xcb // or r11b, cl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x5cb60f44; WORD $0x0324 // movzx r11d, byte [rsp + 3] + WORD $0x0845; BYTE $0xeb // or r11b, r13b + LONG $0x6cb60f44; WORD $0x0724 // movzx r13d, byte [rsp + 7] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x05 // movzx ecx, byte [rsp + 5] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x07e2c041 // shl r10b, 7 + WORD $0x0841; BYTE $0xca // or r10b, cl + WORD $0x0841; BYTE $0xda // or r10b, bl + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + WORD $0xc900 // add cl, cl + LONG $0x06244c02 // add cl, byte [rsp + 6] + LONG $0x02e7c041 // shl r15b, 2 + WORD $0x0841; BYTE $0xcf // or r15b, cl + LONG $0x03e6c041 // shl r14b, 3 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xf1 // or cl, r14b + LONG $0x241c8845 // mov byte [r12], r11b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e1c041 // shl r9b, 6 + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448841; BYTE $0x01 // mov byte [r12 + 1], al LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xc8 // or r8b, r9b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x14244402 // add al, byte [rsp + 20] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x13 // movzx ecx, byte [rsp + 19] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xc108 // or cl, al - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xc7 // or dil, al - WORD $0x0840; BYTE $0xcf // or dil, cl - LONG $0x02468845 // mov byte [r14 + 2], r8b - LONG $0x037e8841 // mov byte [r14 + 3], dil + LONG $0x24548845; BYTE $0x02 // mov byte [r12 + 2], r10b + LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 JNE LBB3_118 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] LBB3_120: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_122: + LONG $0x01498d4c // lea r9, [rcx + 1] LONG $0x04100ff3; BYTE $0x8e // movss xmm0, dword [rsi + 4*rcx] LONG $0x8a042e0f // ucomiss xmm0, dword [rdx + 4*rcx] - LONG $0x01418d4c // lea r8, [rcx + 1] - WORD $0x950f; BYTE $0xd3 // setne bl - WORD $0xdbf6 // neg bl + WORD $0x9a0f; BYTE $0xd3 // setp bl + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd808 // or al, bl + WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] + WORD $0x3044; BYTE $0xc0 // xor al, r8b WORD $0xe180; BYTE $0x07 // and cl, 7 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd820 // and al, bl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xc3 // xor bl, r8b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + WORD $0x894c; BYTE $0xc9 // mov rcx, r9 + WORD $0x394d; BYTE $0xce // cmp r14, r9 JNE LBB3_122 JMP LBB3_123 LBB3_57: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_61 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_59: WORD $0xb60f; BYTE $0x0e // movzx ecx, byte [rsi] @@ -17243,7 +18085,7 @@ LBB3_59: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -17252,49 +18094,49 @@ LBB3_59: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_59 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_61: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_65 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 LBB3_63: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0xb60f; BYTE $0x06 // movzx eax, byte [rsi] LONG $0x014eb60f // movzx ecx, byte [rsi + 1] WORD $0x023a // cmp al, byte [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3a; BYTE $0x01 // cmp cl, byte [rdx + 1] - WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x0246b60f // movzx eax, byte [rsi + 2] WORD $0x423a; BYTE $0x02 // cmp al, byte [rdx + 2] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] LONG $0x0346b60f // movzx eax, byte [rsi + 3] WORD $0x423a; BYTE $0x03 // cmp al, byte [rdx + 3] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] LONG $0x0446b60f // movzx eax, byte [rsi + 4] WORD $0x423a; BYTE $0x04 // cmp al, byte [rdx + 4] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] LONG $0x0546b60f // movzx eax, byte [rsi + 5] WORD $0x423a; BYTE $0x05 // cmp al, byte [rdx + 5] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] LONG $0x0646b60f // movzx eax, byte [rsi + 6] WORD $0x423a; BYTE $0x06 // cmp al, byte [rdx + 6] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] LONG $0x0746b60f // movzx eax, byte [rsi + 7] WORD $0x423a; BYTE $0x07 // cmp al, byte [rdx + 7] LONG $0xd7950f41 // setne r15b LONG $0x0846b60f // movzx eax, byte [rsi + 8] WORD $0x423a; BYTE $0x08 // cmp al, byte [rdx + 8] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x0946b60f // movzx eax, byte [rsi + 9] WORD $0x423a; BYTE $0x09 // cmp al, byte [rdx + 9] LONG $0xd7950f40 // setne dil @@ -17309,16 +18151,16 @@ LBB3_63: LONG $0xd6950f41 // setne r14b LONG $0x0d46b60f // movzx eax, byte [rsi + 13] WORD $0x423a; BYTE $0x0d // cmp al, byte [rdx + 13] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] LONG $0x0e46b60f // movzx eax, byte [rsi + 14] WORD $0x423a; BYTE $0x0e // cmp al, byte [rdx + 14] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] LONG $0x0f46b60f // movzx eax, byte [rsi + 15] WORD $0x423a; BYTE $0x0f // cmp al, byte [rdx + 15] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd0950f41 // setne r8b LONG $0x1046b60f // movzx eax, byte [rsi + 16] WORD $0x423a; BYTE $0x10 // cmp al, byte [rdx + 16] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] LONG $0x1146b60f // movzx eax, byte [rsi + 17] WORD $0x423a; BYTE $0x11 // cmp al, byte [rdx + 17] LONG $0xd4950f41 // setne r12b @@ -17327,144 +18169,144 @@ LBB3_63: LONG $0xd5950f41 // setne r13b LONG $0x1346b60f // movzx eax, byte [rsi + 19] WORD $0x423a; BYTE $0x13 // cmp al, byte [rdx + 19] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] LONG $0x1446b60f // movzx eax, byte [rsi + 20] WORD $0x423a; BYTE $0x14 // cmp al, byte [rdx + 20] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] LONG $0x1546b60f // movzx eax, byte [rsi + 21] WORD $0x423a; BYTE $0x15 // cmp al, byte [rdx + 21] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] LONG $0x1646b60f // movzx eax, byte [rsi + 22] WORD $0x423a; BYTE $0x16 // cmp al, byte [rdx + 22] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] LONG $0x1746b60f // movzx eax, byte [rsi + 23] WORD $0x423a; BYTE $0x17 // cmp al, byte [rdx + 23] LONG $0xd1950f41 // setne r9b LONG $0x1846b60f // movzx eax, byte [rsi + 24] WORD $0x423a; BYTE $0x18 // cmp al, byte [rdx + 24] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] LONG $0x1946b60f // movzx eax, byte [rsi + 25] WORD $0x423a; BYTE $0x19 // cmp al, byte [rdx + 25] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] LONG $0x1a46b60f // movzx eax, byte [rsi + 26] WORD $0x423a; BYTE $0x1a // cmp al, byte [rdx + 26] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] LONG $0x1b46b60f // movzx eax, byte [rsi + 27] WORD $0x423a; BYTE $0x1b // cmp al, byte [rdx + 27] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x1c46b60f // movzx eax, byte [rsi + 28] WORD $0x423a; BYTE $0x1c // cmp al, byte [rdx + 28] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] LONG $0x1d46b60f // movzx eax, byte [rsi + 29] WORD $0x423a; BYTE $0x1d // cmp al, byte [rdx + 29] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x1e46b60f // movzx eax, byte [rsi + 30] WORD $0x423a; BYTE $0x1e // cmp al, byte [rdx + 30] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] LONG $0x1f46b60f // movzx eax, byte [rsi + 31] LONG $0x20c68348 // add rsi, 32 WORD $0x423a; BYTE $0x1f // cmp al, byte [rdx + 31] - LONG $0xd0950f41 // setne r8b - WORD $0xc900 // add cl, cl - LONG $0x28244c02 // add cl, byte [rsp + 40] - WORD $0xc889 // mov eax, ecx - LONG $0x244cb60f; BYTE $0x04 // movzx ecx, byte [rsp + 4] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xdb00 // add bl, bl + LONG $0x04245c02 // add bl, byte [rsp + 4] + WORD $0xd889 // mov eax, ebx + LONG $0x245cb60f; BYTE $0x05 // movzx ebx, byte [rsp + 5] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xcf // or r15b, cl - LONG $0x244cb60f; BYTE $0x14 // movzx ecx, byte [rsp + 20] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x245cb60f; BYTE $0x15 // movzx ebx, byte [rsp + 21] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x07 // add dil, byte [rsp + 7] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x247c0240; BYTE $0x08 // add dil, byte [rsp + 8] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e2c041 // shl r10b, 2 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0xcf89 // mov edi, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx LONG $0x03e3c041 // shl r11b, 3 WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0840; BYTE $0xf9 // or cl, dil + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xde // or r14b, r11b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x247cb60f; BYTE $0x06 // movzx edi, byte [rsp + 6] + LONG $0x247cb60f; BYTE $0x07 // movzx edi, byte [rsp + 7] LONG $0x06e7c040 // shl dil, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x0841; BYTE $0xcf // or r15b, cl - WORD $0xc308 // or bl, al + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0x0841; BYTE $0xc0 // or r8b, al WORD $0x0045; BYTE $0xe4 // add r12b, r12b - LONG $0x24640244; BYTE $0x0d // add r12b, byte [rsp + 13] + LONG $0x24640244; BYTE $0x0e // add r12b, byte [rsp + 14] LONG $0x02e5c041 // shl r13b, 2 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0a // movzx eax, byte [rsp + 10] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x3e // mov byte [r14], r15b - LONG $0x244cb60f; BYTE $0x0b // movzx ecx, byte [rsp + 11] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x243c8845 // mov byte [r12], r15b + LONG $0x245cb60f; BYTE $0x0c // movzx ebx, byte [rsp + 12] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] + LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x12 // movzx ecx, byte [rsp + 18] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc8 // or r8b, cl - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x13 // movzx ebx, byte [rsp + 19] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x20c28348 // add rdx, 32 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff28 // add qword [rsp + 40], -1 JNE LBB3_63 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] LBB3_65: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_67: @@ -17475,30 +18317,30 @@ LBB3_67: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_67 JMP LBB3_123 LBB3_90: - LONG $0x1f7b8d4d // lea r15, [r11 + 31] - WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xfb490f4d // cmovns r15, r11 - LONG $0x07418d41 // lea eax, [r9 + 7] - WORD $0x8545; BYTE $0xc9 // test r9d, r9d - LONG $0xc1490f41 // cmovns eax, r9d - WORD $0xe083; BYTE $0xf8 // and eax, -8 - WORD $0x2941; BYTE $0xc1 // sub r9d, eax + LONG $0x1f7e8d4d // lea r15, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xfe490f4d // cmovns r15, r14 + WORD $0x488d; BYTE $0x07 // lea ecx, [rax + 7] + WORD $0xc085 // test eax, eax + WORD $0x490f; BYTE $0xc8 // cmovns ecx, eax + WORD $0xe183; BYTE $0xf8 // and ecx, -8 + WORD $0xc829 // sub eax, ecx JE LBB3_94 - WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + WORD $0x9848 // cdqe LBB3_92: WORD $0x0e8b // mov ecx, dword [rsi] @@ -17511,7 +18353,7 @@ LBB3_92: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x04b60f45; BYTE $0x3e // movzx r8d, byte [r14 + rdi] + LONG $0x04b60f45; BYTE $0x3c // movzx r8d, byte [r12 + rdi] WORD $0x3045; BYTE $0xc2 // xor r10b, r8b QUAD $0x00000000fd0c8d44 // lea r9d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -17520,49 +18362,49 @@ LBB3_92: WORD $0xe3d3 // shl ebx, cl WORD $0x2044; BYTE $0xd3 // and bl, r10b WORD $0x3044; BYTE $0xc3 // xor bl, r8b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB3_92 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB3_94: LONG $0x05ffc149 // sar r15, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB3_98 - LONG $0x245c894c; BYTE $0x18 // mov qword [rsp + 24], r11 - LONG $0x247c894c; BYTE $0x40 // mov qword [rsp + 64], r15 + LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 LONG $0x247c894c; BYTE $0x38 // mov qword [rsp + 56], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LBB3_96: - LONG $0x2474894c; BYTE $0x30 // mov qword [rsp + 48], r14 + LONG $0x2464894c; BYTE $0x30 // mov qword [rsp + 48], r12 WORD $0x068b // mov eax, dword [rsi] WORD $0x4e8b; BYTE $0x04 // mov ecx, dword [rsi + 4] WORD $0x023b // cmp eax, dword [rdx] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] WORD $0x4a3b; BYTE $0x04 // cmp ecx, dword [rdx + 4] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] WORD $0x468b; BYTE $0x08 // mov eax, dword [rsi + 8] WORD $0x423b; BYTE $0x08 // cmp eax, dword [rdx + 8] - LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] + LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] WORD $0x468b; BYTE $0x0c // mov eax, dword [rsi + 12] WORD $0x423b; BYTE $0x0c // cmp eax, dword [rdx + 12] - LONG $0x2454950f; BYTE $0x15 // setne byte [rsp + 21] + LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] WORD $0x468b; BYTE $0x10 // mov eax, dword [rsi + 16] WORD $0x423b; BYTE $0x10 // cmp eax, dword [rdx + 16] - LONG $0x2454950f; BYTE $0x16 // setne byte [rsp + 22] + LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] WORD $0x468b; BYTE $0x14 // mov eax, dword [rsi + 20] WORD $0x423b; BYTE $0x14 // cmp eax, dword [rdx + 20] - LONG $0x2454950f; BYTE $0x17 // setne byte [rsp + 23] + LONG $0x2454950f; BYTE $0x03 // setne byte [rsp + 3] WORD $0x468b; BYTE $0x18 // mov eax, dword [rsi + 24] WORD $0x423b; BYTE $0x18 // cmp eax, dword [rdx + 24] - LONG $0x2454950f; BYTE $0x04 // setne byte [rsp + 4] + LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] WORD $0x468b; BYTE $0x1c // mov eax, dword [rsi + 28] WORD $0x423b; BYTE $0x1c // cmp eax, dword [rdx + 28] LONG $0xd5950f41 // setne r13b WORD $0x468b; BYTE $0x20 // mov eax, dword [rsi + 32] WORD $0x423b; BYTE $0x20 // cmp eax, dword [rdx + 32] - LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] WORD $0x468b; BYTE $0x24 // mov eax, dword [rsi + 36] WORD $0x423b; BYTE $0x24 // cmp eax, dword [rdx + 36] LONG $0xd0950f41 // setne r8b @@ -17574,165 +18416,165 @@ LBB3_96: LONG $0xd7950f41 // setne r15b WORD $0x468b; BYTE $0x30 // mov eax, dword [rsi + 48] WORD $0x423b; BYTE $0x30 // cmp eax, dword [rdx + 48] - LONG $0x2454950f; BYTE $0x05 // setne byte [rsp + 5] + LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] WORD $0x468b; BYTE $0x34 // mov eax, dword [rsi + 52] WORD $0x423b; BYTE $0x34 // cmp eax, dword [rdx + 52] - LONG $0x2454950f; BYTE $0x06 // setne byte [rsp + 6] + LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] WORD $0x468b; BYTE $0x38 // mov eax, dword [rsi + 56] WORD $0x423b; BYTE $0x38 // cmp eax, dword [rdx + 56] - LONG $0x2454950f; BYTE $0x07 // setne byte [rsp + 7] + LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] WORD $0x468b; BYTE $0x3c // mov eax, dword [rsi + 60] WORD $0x423b; BYTE $0x3c // cmp eax, dword [rdx + 60] - WORD $0x950f; BYTE $0xd3 // setne bl + LONG $0xd7950f40 // setne dil WORD $0x468b; BYTE $0x40 // mov eax, dword [rsi + 64] - WORD $0x4e8b; BYTE $0x44 // mov ecx, dword [rsi + 68] + WORD $0x5e8b; BYTE $0x44 // mov ebx, dword [rsi + 68] WORD $0x423b; BYTE $0x40 // cmp eax, dword [rdx + 64] WORD $0x468b; BYTE $0x48 // mov eax, dword [rsi + 72] - LONG $0x2454950f; BYTE $0x0a // setne byte [rsp + 10] - WORD $0x4a3b; BYTE $0x44 // cmp ecx, dword [rdx + 68] - WORD $0x4e8b; BYTE $0x4c // mov ecx, dword [rsi + 76] + LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + WORD $0x5a3b; BYTE $0x44 // cmp ebx, dword [rdx + 68] + WORD $0x5e8b; BYTE $0x4c // mov ebx, dword [rsi + 76] LONG $0xd2950f41 // setne r10b WORD $0x423b; BYTE $0x48 // cmp eax, dword [rdx + 72] WORD $0x468b; BYTE $0x50 // mov eax, dword [rsi + 80] LONG $0xd6950f41 // setne r14b - WORD $0x4a3b; BYTE $0x4c // cmp ecx, dword [rdx + 76] - WORD $0x4e8b; BYTE $0x54 // mov ecx, dword [rsi + 84] + WORD $0x5a3b; BYTE $0x4c // cmp ebx, dword [rdx + 76] + WORD $0x5e8b; BYTE $0x54 // mov ebx, dword [rsi + 84] LONG $0xd4950f41 // setne r12b WORD $0x423b; BYTE $0x50 // cmp eax, dword [rdx + 80] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] - WORD $0x4a3b; BYTE $0x54 // cmp ecx, dword [rdx + 84] + LONG $0x2454950f; BYTE $0x09 // setne byte [rsp + 9] + WORD $0x5a3b; BYTE $0x54 // cmp ebx, dword [rdx + 84] WORD $0x468b; BYTE $0x58 // mov eax, dword [rsi + 88] - LONG $0x2454950f; BYTE $0x0b // setne byte [rsp + 11] + LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] WORD $0x423b; BYTE $0x58 // cmp eax, dword [rdx + 88] WORD $0x468b; BYTE $0x5c // mov eax, dword [rsi + 92] - LONG $0x2454950f; BYTE $0x0c // setne byte [rsp + 12] + LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] WORD $0x423b; BYTE $0x5c // cmp eax, dword [rdx + 92] WORD $0x468b; BYTE $0x60 // mov eax, dword [rsi + 96] LONG $0xd1950f41 // setne r9b WORD $0x423b; BYTE $0x60 // cmp eax, dword [rdx + 96] WORD $0x468b; BYTE $0x64 // mov eax, dword [rsi + 100] - LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] + LONG $0x2454950f; BYTE $0x14 // setne byte [rsp + 20] WORD $0x423b; BYTE $0x64 // cmp eax, dword [rdx + 100] WORD $0x468b; BYTE $0x68 // mov eax, dword [rsi + 104] - LONG $0x2454950f; BYTE $0x0d // setne byte [rsp + 13] + LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] WORD $0x423b; BYTE $0x68 // cmp eax, dword [rdx + 104] WORD $0x468b; BYTE $0x6c // mov eax, dword [rsi + 108] - LONG $0x2454950f; BYTE $0x0e // setne byte [rsp + 14] + LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] WORD $0x423b; BYTE $0x6c // cmp eax, dword [rdx + 108] WORD $0x468b; BYTE $0x70 // mov eax, dword [rsi + 112] - LONG $0x2454950f; BYTE $0x0f // setne byte [rsp + 15] + LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] WORD $0x423b; BYTE $0x70 // cmp eax, dword [rdx + 112] WORD $0x468b; BYTE $0x74 // mov eax, dword [rsi + 116] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] WORD $0x423b; BYTE $0x74 // cmp eax, dword [rdx + 116] WORD $0x468b; BYTE $0x78 // mov eax, dword [rsi + 120] - LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] + LONG $0x2454950f; BYTE $0x13 // setne byte [rsp + 19] WORD $0x423b; BYTE $0x78 // cmp eax, dword [rdx + 120] WORD $0x468b; BYTE $0x7c // mov eax, dword [rsi + 124] - LONG $0x2454950f; BYTE $0x11 // setne byte [rsp + 17] + LONG $0x2454950f; BYTE $0x12 // setne byte [rsp + 18] LONG $0x80ee8348 // sub rsi, -128 WORD $0x423b; BYTE $0x7c // cmp eax, dword [rdx + 124] - LONG $0xd7950f40 // setne dil - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x950f; BYTE $0xd1 // setne cl + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x28244402 // add al, byte [rsp + 40] - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x04 // movzx eax, byte [rsp + 4] + LONG $0x04244402 // add al, byte [rsp + 4] + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e5c041 // shl r13b, 7 WORD $0x0841; BYTE $0xc5 // or r13b, al - LONG $0x2444b60f; BYTE $0x14 // movzx eax, byte [rsp + 20] + LONG $0x2444b60f; BYTE $0x15 // movzx eax, byte [rsp + 21] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl + WORD $0xd808 // or al, bl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - LONG $0x24440244; BYTE $0x09 // add r8b, byte [rsp + 9] - LONG $0x244cb60f; BYTE $0x15 // movzx ecx, byte [rsp + 21] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xc108 // or cl, al - WORD $0xc889 // mov eax, ecx + LONG $0x24440244; BYTE $0x0a // add r8b, byte [rsp + 10] + LONG $0x245cb60f; BYTE $0x16 // movzx ebx, byte [rsp + 22] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xc308 // or bl, al + WORD $0xd889 // mov eax, ebx LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xc3 // or r11b, r8b - LONG $0x244cb60f; BYTE $0x16 // movzx ecx, byte [rsp + 22] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xc108 // or cl, al - WORD $0x8941; BYTE $0xc8 // mov r8d, ecx + LONG $0x245cb60f; BYTE $0x17 // movzx ebx, byte [rsp + 23] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0xc308 // or bl, al + WORD $0x8941; BYTE $0xd8 // mov r8d, ebx LONG $0x03e7c041 // shl r15b, 3 WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x244cb60f; BYTE $0x17 // movzx ecx, byte [rsp + 23] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0x0844; BYTE $0xc1 // or cl, r8b - LONG $0x2444b60f; BYTE $0x05 // movzx eax, byte [rsp + 5] + LONG $0x245cb60f; BYTE $0x03 // movzx ebx, byte [rsp + 3] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xc3 // or bl, r8b + LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x8941; BYTE $0xc0 // mov r8d, eax - LONG $0x2444b60f; BYTE $0x06 // movzx eax, byte [rsp + 6] + LONG $0x2444b60f; BYTE $0x07 // movzx eax, byte [rsp + 7] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xc0 // or al, r8b - LONG $0x44b60f44; WORD $0x0724 // movzx r8d, byte [rsp + 7] + LONG $0x44b60f44; WORD $0x0824 // movzx r8d, byte [rsp + 8] LONG $0x06e0c041 // shl r8b, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0x0844; BYTE $0xc3 // or bl, r8b - WORD $0x0841; BYTE $0xcd // or r13b, cl - WORD $0xc308 // or bl, al + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0844; BYTE $0xc7 // or dil, r8b + WORD $0x0841; BYTE $0xdd // or r13b, bl + WORD $0x0840; BYTE $0xc7 // or dil, al WORD $0x0045; BYTE $0xd2 // add r10b, r10b - LONG $0x24540244; BYTE $0x0a // add r10b, byte [rsp + 10] + LONG $0x24540244; BYTE $0x0b // add r10b, byte [rsp + 11] LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xf4 // or r12b, r14b - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x09 // movzx eax, byte [rsp + 9] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b - WORD $0xc189 // mov ecx, eax - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - LONG $0x2444b60f; BYTE $0x0b // movzx eax, byte [rsp + 11] + WORD $0xc389 // mov ebx, eax + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + LONG $0x2444b60f; BYTE $0x0c // movzx eax, byte [rsp + 12] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - WORD $0x8845; BYTE $0x2e // mov byte [r14], r13b - LONG $0x244cb60f; BYTE $0x0c // movzx ecx, byte [rsp + 12] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xd808 // or al, bl + LONG $0x242c8845 // mov byte [r12], r13b + LONG $0x245cb60f; BYTE $0x0d // movzx ebx, byte [rsp + 13] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xc9 // or r9b, cl - LONG $0x015e8841 // mov byte [r14 + 1], bl + WORD $0x0841; BYTE $0xd9 // or r9b, bl + LONG $0x247c8841; BYTE $0x01 // mov byte [r12 + 1], dil WORD $0x0841; BYTE $0xc1 // or r9b, al - LONG $0x2444b60f; BYTE $0x0d // movzx eax, byte [rsp + 13] - WORD $0xc000 // add al, al - LONG $0x13244402 // add al, byte [rsp + 19] - WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x0e // movzx eax, byte [rsp + 14] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xc000 // add al, al + LONG $0x14244402 // add al, byte [rsp + 20] + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x0f // movzx eax, byte [rsp + 15] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x11 // movzx eax, byte [rsp + 17] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xc808 // or al, cl - WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x12 // movzx eax, byte [rsp + 18] + WORD $0xd808 // or al, bl + WORD $0xc389 // mov ebx, eax + LONG $0x2444b60f; BYTE $0x13 // movzx eax, byte [rsp + 19] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xc808 // or al, cl - LONG $0x244cb60f; BYTE $0x11 // movzx ecx, byte [rsp + 17] - WORD $0xe1c0; BYTE $0x06 // shl cl, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xcf // or dil, cl - WORD $0x0840; BYTE $0xc7 // or dil, al - LONG $0x024e8845 // mov byte [r14 + 2], r9b - LONG $0x037e8841 // mov byte [r14 + 3], dil + WORD $0xd808 // or al, bl + LONG $0x245cb60f; BYTE $0x12 // movzx ebx, byte [rsp + 18] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + WORD $0xc108 // or cl, al + LONG $0x244c8845; BYTE $0x02 // mov byte [r12 + 2], r9b + LONG $0x244c8841; BYTE $0x03 // mov byte [r12 + 3], cl LONG $0x80c28148; WORD $0x0000; BYTE $0x00 // add rdx, 128 - LONG $0x04c68349 // add r14, 4 - LONG $0x24448348; WORD $0xff38 // add qword [rsp + 56], -1 + LONG $0x04c48349 // add r12, 4 + LONG $0x24448348; WORD $0xff20 // add qword [rsp + 32], -1 JNE LBB3_96 - LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + LONG $0x247c8b4c; BYTE $0x38 // mov r15, qword [rsp + 56] LBB3_98: LONG $0x05e7c149 // shl r15, 5 - WORD $0x394d; BYTE $0xdf // cmp r15, r11 + WORD $0x394d; BYTE $0xf7 // cmp r15, r14 JGE LBB3_123 - WORD $0x294d; BYTE $0xfb // sub r11, r15 + WORD $0x294d; BYTE $0xfe // sub r14, r15 WORD $0xc931 // xor ecx, ecx LBB3_100: @@ -17743,16 +18585,16 @@ LBB3_100: WORD $0xdbf6 // neg bl WORD $0x8948; BYTE $0xcf // mov rdi, rcx LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xe180; BYTE $0x07 // and cl, 7 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd820 // and al, bl WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 + WORD $0x394d; BYTE $0xc6 // cmp r14, r8 JNE LBB3_100 LBB3_123: @@ -17807,7 +18649,7 @@ DATA LCDATA3<>+0x160(SB)/8, $0xffffffffffffffff DATA LCDATA3<>+0x168(SB)/8, $0xffffffffffffffff GLOBL LCDATA3<>(SB), 8, $368 -TEXT ·_comparison_not_equal_arr_scalar_sse4(SB), $328-48 +TEXT ·_comparison_not_equal_arr_scalar_sse4(SB), $344-48 MOVQ typ+0(FP), DI MOVQ left+8(FP), SI @@ -17818,11 +18660,11 @@ TEXT ·_comparison_not_equal_arr_scalar_sse4(SB), $328-48 MOVQ SP, BP ADDQ $16, SP ANDQ $-16, SP - MOVQ BP, 304(SP) + MOVQ BP, 320(SP) LEAQ LCDATA3<>(SB), BP - WORD $0x894d; BYTE $0xc7 // mov r15, r8 - WORD $0x8949; BYTE $0xce // mov r14, rcx + WORD $0x894d; BYTE $0xc6 // mov r14, r8 + WORD $0x8949; BYTE $0xcc // mov r12, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB4_17 WORD $0xff83; BYTE $0x03 // cmp edi, 3 @@ -17830,13 +18672,13 @@ TEXT ·_comparison_not_equal_arr_scalar_sse4(SB), $328-48 WORD $0xff83; BYTE $0x04 // cmp edi, 4 JE LBB4_83 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB4_95 + JE LBB4_99 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB4_179 + JNE LBB4_174 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -17854,7 +18696,8 @@ LBB4_7: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] + WORD $0x894d; BYTE $0xe1 // mov r9, r12 + LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -17863,26 +18706,26 @@ LBB4_7: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1e3c8841 // mov byte [r14 + rbx], dil + LONG $0x1c3c8841 // mov byte [r12 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB4_7 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB4_9: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB4_13 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 LBB4_11: - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6950f41 // setne r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -17896,13 +18739,13 @@ LBB4_11: LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3950f41 // setne r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -17928,122 +18771,123 @@ LBB4_11: LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d LONG $0xd7950f41 // setne r15b LONG $0x606e3944 // cmp dword [rsi + 96], r13d - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x646e3944 // cmp dword [rsi + 100], r13d LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x686e3944 // cmp dword [rsi + 104], r13d - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x706e3944 // cmp dword [rsi + 112], r13d LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x746e3944 // cmp dword [rsi + 116], r13d - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] - QUAD $0x000000b02494b60f // movzx edx, byte [rsp + 176] - WORD $0xd200 // add dl, dl - LONG $0x60245402 // add dl, byte [rsp + 96] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x8841; BYTE $0x1e // mov byte [r14], bl + QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xc000 // add al, al + LONG $0x60244402 // add al, byte [rsp + 96] + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x241c8841 // mov byte [r12], bl LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xc000 // add al, al + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24548841; BYTE $0x03 // mov byte [r12 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c68349 // add r14, 4 - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 + LONG $0x04c48349 // add r12, 4 + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 JNE LBB4_11 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] LBB4_13: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 + WORD $0x014d; BYTE $0xf2 // add r10, r14 JE LBB4_82 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 @@ -18055,7 +18899,8 @@ LBB4_16: WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe6 // mov r14, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -18063,7 +18908,7 @@ LBB4_16: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x046e3944 // cmp dword [rsi + 4], r13d LONG $0x08768d48 // lea rsi, [rsi + 8] @@ -18075,23 +18920,23 @@ LBB4_16: WORD $0xe2d2 // shl dl, cl WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl - LONG $0x3e148841 // mov byte [r14 + rdi], dl + LONG $0x3c148841 // mov byte [r12 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 JNE LBB4_16 - JMP LBB4_153 + JMP LBB4_151 LBB4_17: WORD $0xff83; BYTE $0x08 // cmp edi, 8 JLE LBB4_46 WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB4_107 + JE LBB4_114 WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB4_118 + JE LBB4_125 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB4_179 - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + JNE LBB4_174 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -18104,13 +18949,16 @@ LBB4_17: LBB4_23: LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] LONG $0x08768d48 // lea rsi, [rsi + 8] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe3 // mov r11, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -18119,198 +18967,287 @@ LBB4_23: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB4_23 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB4_25: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB4_29 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 - QUAD $0x000000982494894c // mov qword [rsp + 152], r10 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000000d82494894c // mov qword [rsp + 216], r10 + QUAD $0x000001102494894c // mov qword [rsp + 272], r10 LBB4_27: - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - QUAD $0x000000882494950f // setne byte [rsp + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] - LONG $0xd1950f41 // setne r9b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al LONG $0x462e0f66; BYTE $0x10 // ucomisd xmm0, qword [rsi + 16] - LONG $0xd6950f41 // setne r14b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x462e0f66; BYTE $0x18 // ucomisd xmm0, qword [rsi + 24] - LONG $0xd5950f41 // setne r13b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x462e0f66; BYTE $0x20 // ucomisd xmm0, qword [rsi + 32] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x462e0f66; BYTE $0x28 // ucomisd xmm0, qword [rsi + 40] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al LONG $0x462e0f66; BYTE $0x30 // ucomisd xmm0, qword [rsi + 48] - WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x462e0f66; BYTE $0x38 // ucomisd xmm0, qword [rsi + 56] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x462e0f66; BYTE $0x40 // ucomisd xmm0, qword [rsi + 64] - QUAD $0x000000a02494950f // setne byte [rsp + 160] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x462e0f66; BYTE $0x48 // ucomisd xmm0, qword [rsi + 72] - WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x462e0f66; BYTE $0x50 // ucomisd xmm0, qword [rsi + 80] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x462e0f66; BYTE $0x58 // ucomisd xmm0, qword [rsi + 88] - LONG $0xd2950f41 // setne r10b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al LONG $0x462e0f66; BYTE $0x60 // ucomisd xmm0, qword [rsi + 96] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x462e0f66; BYTE $0x68 // ucomisd xmm0, qword [rsi + 104] - LONG $0xd4950f41 // setne r12b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x462e0f66; BYTE $0x70 // ucomisd xmm0, qword [rsi + 112] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x462e0f66; BYTE $0x78 // ucomisd xmm0, qword [rsi + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl QUAD $0x00000080862e0f66 // ucomisd xmm0, qword [rsi + 128] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl QUAD $0x00000088862e0f66 // ucomisd xmm0, qword [rsi + 136] - QUAD $0x000000c02494950f // setne byte [rsp + 192] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl QUAD $0x00000090862e0f66 // ucomisd xmm0, qword [rsi + 144] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl QUAD $0x00000098862e0f66 // ucomisd xmm0, qword [rsi + 152] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl QUAD $0x000000a0862e0f66 // ucomisd xmm0, qword [rsi + 160] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xe0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 224], cl QUAD $0x000000a8862e0f66 // ucomisd xmm0, qword [rsi + 168] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl QUAD $0x000000b0862e0f66 // ucomisd xmm0, qword [rsi + 176] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] - QUAD $0x000000b8862e0f66 // ucomisd xmm0, qword [rsi + 184] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al + QUAD $0x000000b8862e0f66 // ucomisd xmm0, qword [rsi + 184] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al QUAD $0x000000c0862e0f66 // ucomisd xmm0, qword [rsi + 192] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl QUAD $0x000000c8862e0f66 // ucomisd xmm0, qword [rsi + 200] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al QUAD $0x000000d0862e0f66 // ucomisd xmm0, qword [rsi + 208] - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000d8862e0f66 // ucomisd xmm0, qword [rsi + 216] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al QUAD $0x000000e0862e0f66 // ucomisd xmm0, qword [rsi + 224] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xc0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 192], cl QUAD $0x000000e8862e0f66 // ucomisd xmm0, qword [rsi + 232] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xf0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 240], cl QUAD $0x000000f0862e0f66 // ucomisd xmm0, qword [rsi + 240] - LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al QUAD $0x000000f8862e0f66 // ucomisd xmm0, qword [rsi + 248] - LONG $0xd0950f41 // setne r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000088248c0244 // add r9b, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x000000b024bcb60f // movzx edi, byte [rsp + 176] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] - WORD $0xc000 // add al, al - LONG $0x60244402 // add al, byte [rsp + 96] - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x8841; BYTE $0x1e // mov byte [r14], bl - LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + WORD $0x0045; BYTE $0xed // add r13b, r13b + LONG $0x246c0244; BYTE $0x20 // add r13b, byte [rsp + 32] + WORD $0x8944; BYTE $0xe8 // mov eax, r13d + LONG $0x05e4c041 // shl r12b, 5 + LONG $0x6cb60f44; WORD $0x3824 // movzx r13d, byte [rsp + 56] + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + LONG $0x64b60f44; WORD $0x3024 // movzx r12d, byte [rsp + 48] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x244cb60f; BYTE $0x68 // movzx ecx, byte [rsp + 104] WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + LONG $0x40244c02 // add cl, byte [rsp + 64] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x08244488 // mov byte [rsp + 8], al + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0xcb08 // or bl, cl + LONG $0x6cb60f44; WORD $0x2824 // movzx r13d, byte [rsp + 40] + LONG $0x04e5c041 // shl r13b, 4 + WORD $0x0841; BYTE $0xc5 // or r13b, al + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd808 // or al, bl + LONG $0x18244488 // mov byte [rsp + 24], al + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x74b60f44; WORD $0x7024 // movzx r14d, byte [rsp + 112] + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + WORD $0xc900 // add cl, cl + LONG $0x50244c02 // add cl, byte [rsp + 80] + QUAD $0x000000b0249cb60f // movzx ebx, byte [rsp + 176] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xcb08 // or bl, cl + QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] + QUAD $0x000000a0248cb60f // movzx ecx, byte [rsp + 160] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + QUAD $0x000000e0248cb60f // movzx ecx, byte [rsp + 224] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 + WORD $0xd908 // or cl, bl + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x0844; BYTE $0xe8 // or al, r13b + QUAD $0x00000098249cb60f // movzx ebx, byte [rsp + 152] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c041 // shl r15b, 6 + WORD $0x0841; BYTE $0xdf // or r15b, bl + LONG $0x24740a44; BYTE $0x18 // or r14b, byte [rsp + 24] LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl + WORD $0x0845; BYTE $0xf8 // or r8b, r15b WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x00000088249c0244 // add r11b, byte [rsp + 136] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + QUAD $0x000000c0248cb60f // movzx ecx, byte [rsp + 192] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xc9 // or cl, r9b + WORD $0xcb89 // mov ebx, ecx + LONG $0x24048841 // mov byte [r12], al + QUAD $0x000000f0248cb60f // movzx ecx, byte [rsp + 240] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x24748845; BYTE $0x01 // mov byte [r12 + 1], r14b + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0xda08 // or dl, bl + LONG $0x24448845; BYTE $0x02 // mov byte [r12 + 2], r8b + LONG $0x24548841; BYTE $0x03 // mov byte [r12 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c68349 // add r14, 4 - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 + LONG $0x04c48349 // add r12, 4 + QUAD $0x0000011024848348; BYTE $0xff // add qword [rsp + 272], -1 JNE LBB4_27 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x000000d824948b4c // mov r10, qword [rsp + 216] LBB4_29: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB4_162 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JNE LBB4_160 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB4_164 + JMP LBB4_162 LBB4_32: WORD $0xff83; BYTE $0x02 // cmp edi, 2 JE LBB4_60 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB4_179 + JNE LBB4_174 WORD $0x8a44; BYTE $0x1a // mov r11b, byte [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -18328,7 +19265,8 @@ LBB4_36: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe7 // mov r15, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -18337,43 +19275,43 @@ LBB4_36: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB4_36 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB4_38: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 - JL LBB4_130 + LONG $0x20fe8349 // cmp r14, 32 + JL LBB4_137 LONG $0x10fa8349 // cmp r10, 16 LONG $0x245c8844; BYTE $0x08 // mov byte [rsp + 8], r11b - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000001082494894c // mov qword [rsp + 264], r10 JB LBB4_42 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB4_180 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + WORD $0x3949; BYTE $0xc4 // cmp r12, rax + JAE LBB4_179 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB4_180 + JAE LBB4_179 LBB4_42: WORD $0xc031 // xor eax, eax - QUAD $0x000000f024848948 // mov qword [rsp + 240], rax - LONG $0x2474894c; BYTE $0x68 // mov qword [rsp + 104], r14 + QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + LONG $0x2464894c; BYTE $0x68 // mov qword [rsp + 104], r12 LBB4_43: - QUAD $0x000000f024942b4c // sub r10, qword [rsp + 240] - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 + QUAD $0x000000d824942b4c // sub r10, qword [rsp + 216] + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 LBB4_44: WORD $0x8948; BYTE $0xf1 // mov rcx, rsi WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b - QUAD $0x000000e02494950f // setne byte [rsp + 224] + QUAD $0x000000f02494950f // setne byte [rsp + 240] LONG $0x015e3844 // cmp byte [rsi + 1], r11b LONG $0xd6950f40 // setne sil LONG $0x02593844 // cmp byte [rcx + 2], r11b @@ -18395,7 +19333,7 @@ LBB4_44: LONG $0xd1950f41 // setne r9b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x08 // cmp byte [rcx + 8], al - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x09 // cmp byte [rcx + 9], al WORD $0x950f; BYTE $0xd2 // setne dl @@ -18446,19 +19384,19 @@ LBB4_44: LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x19 // cmp byte [rcx + 25], bl - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1a // cmp byte [rcx + 26], bl LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1b // cmp byte [rcx + 27], bl - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1c // cmp byte [rcx + 28], bl LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1d // cmp byte [rcx + 29], bl - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1e // cmp byte [rcx + 30], bl QUAD $0x000000802494950f // setne byte [rsp + 128] @@ -18466,7 +19404,7 @@ LBB4_44: WORD $0x5938; BYTE $0x1f // cmp byte [rcx + 31], bl WORD $0x950f; BYTE $0xd3 // setne bl WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000e024b40240 // add sil, byte [rsp + 224] + QUAD $0x000000f024b40240 // add sil, byte [rsp + 240] QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e1c041 // shl r9b, 7 @@ -18474,7 +19412,7 @@ LBB4_44: LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xd200 // add dl, dl - LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] + LONG $0xe0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 224] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x7cb60f44; WORD $0x0824 // movzx r15d, byte [rsp + 8] @@ -18526,7 +19464,7 @@ LBB4_44: WORD $0x0841; BYTE $0xfb // or r11b, dil LONG $0x01428844 // mov byte [rdx + 1], r8b WORD $0x0841; BYTE $0xf3 // or r11b, sil - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xc000 // add al, al LONG $0x40244402 // add al, byte [rsp + 64] WORD $0xc689 // mov esi, eax @@ -18534,7 +19472,7 @@ LBB4_44: WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax @@ -18542,7 +19480,7 @@ LBB4_44: WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf0 // or al, sil QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] @@ -18556,21 +19494,21 @@ LBB4_44: LONG $0x20718d48 // lea rsi, [rcx + 32] LONG $0x04c28348 // add rdx, 4 LONG $0x24548948; BYTE $0x68 // mov qword [rsp + 104], rdx - QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 JNE LBB4_44 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - JMP LBB4_131 + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + JMP LBB4_138 LBB4_46: WORD $0xff83; BYTE $0x07 // cmp edi, 7 JE LBB4_72 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB4_179 + JNE LBB4_174 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -18588,7 +19526,8 @@ LBB4_50: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] + WORD $0x894d; BYTE $0xe1 // mov r9, r12 + LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -18597,26 +19536,26 @@ LBB4_50: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1e3c8841 // mov byte [r14 + rbx], dil + LONG $0x1c3c8841 // mov byte [r12 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB4_50 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB4_52: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB4_56 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 LBB4_54: - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6950f41 // setne r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -18630,13 +19569,13 @@ LBB4_54: LONG $0x386e394c // cmp qword [rsi + 56], r13 WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3950f41 // setne r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -18662,123 +19601,124 @@ LBB4_54: LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 LONG $0xd7950f41 // setne r15b LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] - QUAD $0x000000b02494b60f // movzx edx, byte [rsp + 176] - WORD $0xd200 // add dl, dl - LONG $0x60245402 // add dl, byte [rsp + 96] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x8841; BYTE $0x1e // mov byte [r14], bl + QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xc000 // add al, al + LONG $0x60244402 // add al, byte [rsp + 96] + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x241c8841 // mov byte [r12], bl LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xc000 // add al, al + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24548841; BYTE $0x03 // mov byte [r12 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c68349 // add r14, 4 - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 + LONG $0x04c48349 // add r12, 4 + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 JNE LBB4_54 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] LBB4_56: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JE LBB4_117 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JE LBB4_124 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d @@ -18789,7 +19729,8 @@ LBB4_59: WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe6 // mov r14, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -18797,7 +19738,7 @@ LBB4_59: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x086e394c // cmp qword [rsi + 8], r13 LONG $0x10768d48 // lea rsi, [rsi + 16] @@ -18809,16 +19750,16 @@ LBB4_59: WORD $0xe2d2 // shl dl, cl WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl - LONG $0x3e148841 // mov byte [r14 + rdi], dl + LONG $0x3c148841 // mov byte [r12 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 JNE LBB4_59 - JMP LBB4_168 + JMP LBB4_170 LBB4_60: WORD $0x8a44; BYTE $0x1a // mov r11b, byte [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -18836,7 +19777,8 @@ LBB4_62: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe7 // mov r15, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -18845,43 +19787,43 @@ LBB4_62: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB4_62 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB4_64: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 - JL LBB4_134 + LONG $0x20fe8349 // cmp r14, 32 + JL LBB4_141 LONG $0x10fa8349 // cmp r10, 16 LONG $0x245c8844; BYTE $0x08 // mov byte [rsp + 8], r11b - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000001002494894c // mov qword [rsp + 256], r10 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000001202494894c // mov qword [rsp + 288], r10 JB LBB4_68 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB4_183 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + WORD $0x3949; BYTE $0xc4 // cmp r12, rax + JAE LBB4_182 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB4_183 + JAE LBB4_182 LBB4_68: WORD $0xc031 // xor eax, eax - QUAD $0x000000f024848948 // mov qword [rsp + 240], rax - LONG $0x2474894c; BYTE $0x68 // mov qword [rsp + 104], r14 + QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + LONG $0x2464894c; BYTE $0x68 // mov qword [rsp + 104], r12 LBB4_69: - QUAD $0x000000f024942b4c // sub r10, qword [rsp + 240] - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 + QUAD $0x000000d824942b4c // sub r10, qword [rsp + 216] + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 LBB4_70: WORD $0x8948; BYTE $0xf1 // mov rcx, rsi WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b - QUAD $0x000000e02494950f // setne byte [rsp + 224] + QUAD $0x000000f02494950f // setne byte [rsp + 240] LONG $0x015e3844 // cmp byte [rsi + 1], r11b LONG $0xd6950f40 // setne sil LONG $0x02593844 // cmp byte [rcx + 2], r11b @@ -18903,7 +19845,7 @@ LBB4_70: LONG $0xd1950f41 // setne r9b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x08 // cmp byte [rcx + 8], al - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x09 // cmp byte [rcx + 9], al WORD $0x950f; BYTE $0xd2 // setne dl @@ -18954,19 +19896,19 @@ LBB4_70: LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x19 // cmp byte [rcx + 25], bl - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1a // cmp byte [rcx + 26], bl LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1b // cmp byte [rcx + 27], bl - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1c // cmp byte [rcx + 28], bl LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1d // cmp byte [rcx + 29], bl - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1e // cmp byte [rcx + 30], bl QUAD $0x000000802494950f // setne byte [rsp + 128] @@ -18974,7 +19916,7 @@ LBB4_70: WORD $0x5938; BYTE $0x1f // cmp byte [rcx + 31], bl WORD $0x950f; BYTE $0xd3 // setne bl WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000e024b40240 // add sil, byte [rsp + 224] + QUAD $0x000000f024b40240 // add sil, byte [rsp + 240] QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e1c041 // shl r9b, 7 @@ -18982,7 +19924,7 @@ LBB4_70: LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xd200 // add dl, dl - LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] + LONG $0xe0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 224] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x7cb60f44; WORD $0x0824 // movzx r15d, byte [rsp + 8] @@ -19034,7 +19976,7 @@ LBB4_70: WORD $0x0841; BYTE $0xfb // or r11b, dil LONG $0x01428844 // mov byte [rdx + 1], r8b WORD $0x0841; BYTE $0xf3 // or r11b, sil - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xc000 // add al, al LONG $0x40244402 // add al, byte [rsp + 64] WORD $0xc689 // mov esi, eax @@ -19042,7 +19984,7 @@ LBB4_70: WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax @@ -19050,7 +19992,7 @@ LBB4_70: WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf0 // or al, sil QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] @@ -19064,17 +20006,17 @@ LBB4_70: LONG $0x20718d48 // lea rsi, [rcx + 32] LONG $0x04c28348 // add rdx, 4 LONG $0x24548948; BYTE $0x68 // mov qword [rsp + 104], rdx - QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 JNE LBB4_70 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x0000010024948b4c // mov r10, qword [rsp + 256] - JMP LBB4_135 + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] + JMP LBB4_142 LBB4_72: WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -19092,7 +20034,8 @@ LBB4_74: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] + WORD $0x894d; BYTE $0xe1 // mov r9, r12 + LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -19101,26 +20044,26 @@ LBB4_74: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1e3c8841 // mov byte [r14 + rbx], dil + LONG $0x1c3c8841 // mov byte [r12 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB4_74 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB4_76: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 + LONG $0x20fe8349 // cmp r14, 32 JL LBB4_80 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 LBB4_78: - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6950f41 // setne r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d @@ -19134,13 +20077,13 @@ LBB4_78: LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3950f41 // setne r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d @@ -19166,133 +20109,134 @@ LBB4_78: LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d LONG $0xd7950f41 // setne r15b LONG $0x606e3944 // cmp dword [rsi + 96], r13d - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x646e3944 // cmp dword [rsi + 100], r13d LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x686e3944 // cmp dword [rsi + 104], r13d - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x706e3944 // cmp dword [rsi + 112], r13d LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x746e3944 // cmp dword [rsi + 116], r13d - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] - QUAD $0x000000b02494b60f // movzx edx, byte [rsp + 176] - WORD $0xd200 // add dl, dl - LONG $0x60245402 // add dl, byte [rsp + 96] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x8841; BYTE $0x1e // mov byte [r14], bl + QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xc000 // add al, al + LONG $0x60244402 // add al, byte [rsp + 96] + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x241c8841 // mov byte [r12], bl LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xc000 // add al, al + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24548841; BYTE $0x03 // mov byte [r12 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c68349 // add r14, 4 - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 + LONG $0x04c48349 // add r12, 4 + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 JNE LBB4_78 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] LBB4_80: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB4_151 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JNE LBB4_149 LBB4_82: WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB4_153 + JMP LBB4_151 LBB4_83: LONG $0x2ab70f44 // movzx r13d, word [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -19310,7 +20254,8 @@ LBB4_85: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe7 // mov r15, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -19319,38 +20264,37 @@ LBB4_85: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB4_85 - LONG $0x01c68349 // add r14, 1 + LONG $0x01c48349 // add r12, 1 LBB4_87: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 - JL LBB4_138 + LONG $0x20fe8349 // cmp r14, 32 + JL LBB4_95 LONG $0x08fa8349 // cmp r10, 8 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 JB LBB4_91 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB4_186 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + WORD $0x3949; BYTE $0xc4 // cmp r12, rax + JAE LBB4_185 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB4_186 + JBE LBB4_185 LBB4_91: WORD $0xc031 // xor eax, eax - LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax - WORD $0x894d; BYTE $0xf4 // mov r12, r14 + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax LBB4_92: LONG $0x2464894c; BYTE $0x08 // mov qword [rsp + 8], r12 - LONG $0x24542b4c; BYTE $0x18 // sub r10, qword [rsp + 24] - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + LONG $0x24542b4c; BYTE $0x20 // sub r10, qword [rsp + 32] + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 LBB4_93: WORD $0x8949; BYTE $0xf3 // mov r11, rsi @@ -19371,7 +20315,7 @@ LBB4_93: LONG $0x6b394566; BYTE $0x0e // cmp word [r11 + 14], r13w WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x6b394566; BYTE $0x10 // cmp word [r11 + 16], r13w - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x6b394566; BYTE $0x12 // cmp word [r11 + 18], r13w WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x6b394566; BYTE $0x14 // cmp word [r11 + 20], r13w @@ -19407,13 +20351,13 @@ LBB4_93: LONG $0x6b394566; BYTE $0x32 // cmp word [r11 + 50], r13w LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x6b394566; BYTE $0x34 // cmp word [r11 + 52], r13w - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] - LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x6b394566; BYTE $0x38 // cmp word [r11 + 56], r13w LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x6b394566; BYTE $0x3a // cmp word [r11 + 58], r13w - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0x6b394566; BYTE $0x3c // cmp word [r11 + 60], r13w QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x6b394566; BYTE $0x3e // cmp word [r11 + 62], r13w @@ -19426,7 +20370,7 @@ LBB4_93: LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xc900 // add cl, cl - LONG $0xc0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 192] + LONG $0xe0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 224] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x02e0c041 // shl r8b, 2 @@ -19483,11 +20427,11 @@ LBB4_93: WORD $0xc000 // add al, al LONG $0x30244402 // add al, byte [rsp + 48] WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax @@ -19495,7 +20439,7 @@ LBB4_93: WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xd808 // or al, bl QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] @@ -19508,27 +20452,71 @@ LBB4_93: LONG $0x40738d49 // lea rsi, [r11 + 64] LONG $0x04c18348 // add rcx, 4 LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 JNE LBB4_93 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] - JMP LBB4_139 LBB4_95: + LONG $0x05e2c149 // shl r10, 5 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 + WORD $0x294d; BYTE $0xd0 // sub r8, r10 + WORD $0xf749; BYTE $0xd2 // not r10 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JE LBB4_113 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xf6 // xor r14d, r14d + +LBB4_98: + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x2e394466 // cmp word [rsi], r13w + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xf7 // mov rdi, r14 + LONG $0x03efc148 // shr rdi, 3 + LONG $0x14b60f45; BYTE $0x3c // movzx r10d, byte [r12 + rdi] + WORD $0x8944; BYTE $0xf1 // mov ecx, r14d + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd2 // xor dl, r10b + WORD $0xd320 // and bl, dl + WORD $0x3044; BYTE $0xd3 // xor bl, r10b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x02c68349 // add r14, 2 + LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w + LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xdaf6 // neg dl + WORD $0xda30 // xor dl, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd020 // and al, dl + WORD $0xd830 // xor al, bl + LONG $0x3c048841 // mov byte [r12 + rdi], al + WORD $0x394d; BYTE $0xf1 // cmp r9, r14 + JNE LBB4_98 + JMP LBB4_166 + +LBB4_99: LONG $0x2ab70f44 // movzx r13d, word [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB4_99 + JE LBB4_103 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB4_97: +LBB4_101: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x950f; BYTE $0xd2 // setne dl @@ -19537,7 +20525,8 @@ LBB4_97: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe7 // mov r15, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -19546,40 +20535,39 @@ LBB4_97: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB4_97 - LONG $0x01c68349 // add r14, 1 + JNE LBB4_101 + LONG $0x01c48349 // add r12, 1 -LBB4_99: +LBB4_103: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 - JL LBB4_143 + LONG $0x20fe8349 // cmp r14, 32 + JL LBB4_111 LONG $0x08fa8349 // cmp r10, 8 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 - JB LBB4_103 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 + JB LBB4_107 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB4_189 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + WORD $0x3949; BYTE $0xc4 // cmp r12, rax + JAE LBB4_191 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB4_189 + JBE LBB4_191 -LBB4_103: +LBB4_107: WORD $0xc031 // xor eax, eax - LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax - WORD $0x894d; BYTE $0xf4 // mov r12, r14 + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax -LBB4_104: +LBB4_108: LONG $0x2464894c; BYTE $0x08 // mov qword [rsp + 8], r12 - LONG $0x24542b4c; BYTE $0x18 // sub r10, qword [rsp + 24] - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + LONG $0x24542b4c; BYTE $0x20 // sub r10, qword [rsp + 32] + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 -LBB4_105: +LBB4_109: WORD $0x8949; BYTE $0xf3 // mov r11, rsi LONG $0x2e394466 // cmp word [rsi], r13w QUAD $0x000000982494950f // setne byte [rsp + 152] @@ -19598,7 +20586,7 @@ LBB4_105: LONG $0x6b394566; BYTE $0x0e // cmp word [r11 + 14], r13w WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x6b394566; BYTE $0x10 // cmp word [r11 + 16], r13w - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x6b394566; BYTE $0x12 // cmp word [r11 + 18], r13w WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x6b394566; BYTE $0x14 // cmp word [r11 + 20], r13w @@ -19634,13 +20622,13 @@ LBB4_105: LONG $0x6b394566; BYTE $0x32 // cmp word [r11 + 50], r13w LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x6b394566; BYTE $0x34 // cmp word [r11 + 52], r13w - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] - LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x6b394566; BYTE $0x38 // cmp word [r11 + 56], r13w LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x6b394566; BYTE $0x3a // cmp word [r11 + 58], r13w - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0x6b394566; BYTE $0x3c // cmp word [r11 + 60], r13w QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x6b394566; BYTE $0x3e // cmp word [r11 + 62], r13w @@ -19653,7 +20641,7 @@ LBB4_105: LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xc900 // add cl, cl - LONG $0xc0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 192] + LONG $0xe0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 224] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x02e0c041 // shl r8b, 2 @@ -19710,11 +20698,11 @@ LBB4_105: WORD $0xc000 // add al, al LONG $0x30244402 // add al, byte [rsp + 48] WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax @@ -19722,7 +20710,7 @@ LBB4_105: WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xd808 // or al, bl QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] @@ -19735,27 +20723,40 @@ LBB4_105: LONG $0x40738d49 // lea rsi, [r11 + 64] LONG $0x04c18348 // add rcx, 4 LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 - JNE LBB4_105 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 + JNE LBB4_109 + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] - JMP LBB4_144 -LBB4_107: +LBB4_111: + LONG $0x05e2c149 // shl r10, 5 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 + WORD $0x294d; BYTE $0xd0 // sub r8, r10 + WORD $0xf749; BYTE $0xd2 // not r10 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JNE LBB4_164 + +LBB4_113: + WORD $0x3145; BYTE $0xf6 // xor r14d, r14d + JMP LBB4_166 + +LBB4_114: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB4_111 + JE LBB4_118 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB4_109: +LBB4_116: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] WORD $0x950f; BYTE $0xd2 // setne dl @@ -19764,7 +20765,8 @@ LBB4_109: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - LONG $0x04b60f45; BYTE $0x1e // movzx r8d, byte [r14 + rbx] + WORD $0x894d; BYTE $0xe1 // mov r9, r12 + LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -19773,26 +20775,26 @@ LBB4_109: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1e3c8841 // mov byte [r14 + rbx], dil + LONG $0x1c3c8841 // mov byte [r12 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB4_109 - LONG $0x01c68349 // add r14, 1 + JNE LBB4_116 + LONG $0x01c48349 // add r12, 1 -LBB4_111: +LBB4_118: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 - JL LBB4_115 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + LONG $0x20fe8349 // cmp r14, 32 + JL LBB4_122 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 -LBB4_113: - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 +LBB4_120: + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7950f40 // setne dil + LONG $0xd2950f41 // setne r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6950f41 // setne r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 @@ -19806,13 +20808,13 @@ LBB4_113: LONG $0x386e394c // cmp qword [rsi + 56], r13 WORD $0x950f; BYTE $0xd3 // setne bl LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000e02494950f // setne byte [rsp + 224] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x950f; BYTE $0xd2 // setne dl + LONG $0xd7950f40 // setne dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1950f41 // setne r9b + LONG $0xd0950f41 // setne r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2950f41 // setne r10b + LONG $0xd1950f41 // setne r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3950f41 // setne r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 @@ -19838,151 +20840,155 @@ LBB4_113: LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 LONG $0xd7950f41 // setne r15b LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0950f41 // setne r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x0000009824bc0240 // add dil, byte [rsp + 152] + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009824940244 // add r10b, byte [rsp + 152] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0xc0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 192] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x000000e024bc0240 // add dil, byte [rsp + 224] QUAD $0x000000882484b60f // movzx eax, byte [rsp + 136] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] - QUAD $0x000000b02494b60f // movzx edx, byte [rsp + 176] - WORD $0xd200 // add dl, dl - LONG $0x60245402 // add dl, byte [rsp + 96] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x8841; BYTE $0x1e // mov byte [r14], bl + QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xc000 // add al, al + LONG $0x60244402 // add al, byte [rsp + 96] + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x241c8841 // mov byte [r12], bl LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] - WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xc000 // add al, al + LONG $0x28244402 // add al, byte [rsp + 40] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al + LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b + LONG $0x24548841; BYTE $0x03 // mov byte [r12 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c68349 // add r14, 4 - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 - JNE LBB4_113 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + LONG $0x04c48349 // add r12, 4 + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 + JNE LBB4_120 + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] -LBB4_115: +LBB4_122: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB4_166 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JNE LBB4_168 -LBB4_117: +LBB4_124: WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB4_168 + JMP LBB4_170 -LBB4_118: - LONG $0x1f578d4d // lea r10, [r15 + 31] - WORD $0x854d; BYTE $0xff // test r15, r15 - LONG $0xd7490f4d // cmovns r10, r15 +LBB4_125: + LONG $0x1f568d4d // lea r10, [r14 + 31] + WORD $0x854d; BYTE $0xf6 // test r14, r14 + LONG $0xd6490f4d // cmovns r10, r14 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x02100ff3 // movss xmm0, dword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB4_122 + JE LBB4_129 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB4_120: +LBB4_127: WORD $0x2e0f; BYTE $0x06 // ucomiss xmm0, dword [rsi] LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x894d; BYTE $0xe3 // mov r11, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -19991,216 +20997,307 @@ LBB4_120: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB4_120 - LONG $0x01c68349 // add r14, 1 + JNE LBB4_127 + LONG $0x01c48349 // add r12, 1 -LBB4_122: +LBB4_129: LONG $0x05fac149 // sar r10, 5 - LONG $0x20ff8349 // cmp r15, 32 - JL LBB4_147 + LONG $0x20fe8349 // cmp r14, 32 + JL LBB4_145 LONG $0x04fa8349 // cmp r10, 4 - JB LBB4_126 + JB LBB4_133 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x07e0c148 // shl rax, 7 WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB4_192 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + WORD $0x3949; BYTE $0xc4 // cmp r12, rax + JAE LBB4_188 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB4_192 + JBE LBB4_188 -LBB4_126: +LBB4_133: WORD $0x3145; BYTE $0xc0 // xor r8d, r8d WORD $0x8948; BYTE $0xf3 // mov rbx, rsi - WORD $0x894d; BYTE $0xf3 // mov r11, r14 + WORD $0x894d; BYTE $0xe7 // mov r15, r12 -LBB4_127: - LONG $0x245c894c; BYTE $0x08 // mov qword [rsp + 8], r11 - QUAD $0x0000009024bc894c // mov qword [rsp + 144], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 - WORD $0x294d; BYTE $0xc2 // sub r10, r8 - QUAD $0x000000982494894c // mov qword [rsp + 152], r10 +LBB4_134: + QUAD $0x0000008024bc894c // mov qword [rsp + 128], r15 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x000001102494894c // mov qword [rsp + 272], r10 + WORD $0x294d; BYTE $0xc2 // sub r10, r8 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 -LBB4_128: +LBB4_135: WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - QUAD $0x000000882494950f // setne byte [rsp + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + WORD $0x8941; BYTE $0xcd // mov r13d, ecx LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] - LONG $0xd0950f41 // setne r8b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al LONG $0x08432e0f // ucomiss xmm0, dword [rbx + 8] - LONG $0xd6950f41 // setne r14b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x0c432e0f // ucomiss xmm0, dword [rbx + 12] - LONG $0xd5950f41 // setne r13b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x10432e0f // ucomiss xmm0, dword [rbx + 16] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x14432e0f // ucomiss xmm0, dword [rbx + 20] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x18432e0f // ucomiss xmm0, dword [rbx + 24] - WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x1c432e0f // ucomiss xmm0, dword [rbx + 28] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x20432e0f // ucomiss xmm0, dword [rbx + 32] - QUAD $0x000000a02494950f // setne byte [rsp + 160] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x24432e0f // ucomiss xmm0, dword [rbx + 36] - WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x28432e0f // ucomiss xmm0, dword [rbx + 40] - LONG $0xd6950f40 // setne sil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x2c432e0f // ucomiss xmm0, dword [rbx + 44] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x30432e0f // ucomiss xmm0, dword [rbx + 48] - LONG $0xd2950f41 // setne r10b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x34432e0f // ucomiss xmm0, dword [rbx + 52] - LONG $0xd4950f41 // setne r12b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x38432e0f // ucomiss xmm0, dword [rbx + 56] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl LONG $0x3c432e0f // ucomiss xmm0, dword [rbx + 60] - LONG $0xd1950f41 // setne r9b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl LONG $0x40432e0f // ucomiss xmm0, dword [rbx + 64] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x44432e0f // ucomiss xmm0, dword [rbx + 68] - QUAD $0x000000c02494950f // setne byte [rsp + 192] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al LONG $0x48432e0f // ucomiss xmm0, dword [rbx + 72] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl LONG $0x4c432e0f // ucomiss xmm0, dword [rbx + 76] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xa0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 160], cl LONG $0x50432e0f // ucomiss xmm0, dword [rbx + 80] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xe0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 224], cl LONG $0x54432e0f // ucomiss xmm0, dword [rbx + 84] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl LONG $0x58432e0f // ucomiss xmm0, dword [rbx + 88] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al LONG $0x5c432e0f // ucomiss xmm0, dword [rbx + 92] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x60432e0f // ucomiss xmm0, dword [rbx + 96] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl LONG $0x64432e0f // ucomiss xmm0, dword [rbx + 100] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al LONG $0x68432e0f // ucomiss xmm0, dword [rbx + 104] - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al LONG $0x6c432e0f // ucomiss xmm0, dword [rbx + 108] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x70432e0f // ucomiss xmm0, dword [rbx + 112] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al LONG $0x74432e0f // ucomiss xmm0, dword [rbx + 116] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xf0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 240], cl LONG $0x78432e0f // ucomiss xmm0, dword [rbx + 120] - QUAD $0x000000802494950f // setne byte [rsp + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x7c432e0f // ucomiss xmm0, dword [rbx + 124] - WORD $0x950f; BYTE $0xd1 // setne cl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x0000008824840244 // add r8b, byte [rsp + 136] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xc6 // or r14b, r8b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f40 // setne sil + WORD $0x0840; BYTE $0xc6 // or sil, al WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e6c040 // shl sil, 2 - WORD $0x0840; BYTE $0xd6 // or sil, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd0 // mov r8d, edx - LONG $0x03e7c040 // shl dil, 3 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xc2 // or dl, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000b024b4b60f // movzx esi, byte [rsp + 176] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xf1 // or r9b, sil - WORD $0x0841; BYTE $0xd3 // or r11b, dl - WORD $0x0845; BYTE $0xe1 // or r9b, r12b - QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] - WORD $0xc000 // add al, al - LONG $0x60244402 // add al, byte [rsp + 96] - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + WORD $0x8941; BYTE $0xd5 // mov r13d, edx + LONG $0x2454b60f; BYTE $0x20 // movzx edx, byte [rsp + 32] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xf2 // or dl, sil - LONG $0x24748b48; BYTE $0x08 // mov rsi, qword [rsp + 8] - WORD $0x8844; BYTE $0x1e // mov byte [rsi], r11b - LONG $0x247cb60f; BYTE $0x38 // movzx edi, byte [rsp + 56] - LONG $0x06e7c040 // shl dil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x014e8844 // mov byte [rsi + 1], r9b - WORD $0x0841; BYTE $0xd7 // or r15b, dl LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] + WORD $0xc900 // add cl, cl + LONG $0x30244c02 // add cl, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax + LONG $0x08244488 // mov byte [rsp + 8], al + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + WORD $0xe2c0; BYTE $0x04 // shl dl, 4 + WORD $0xca08 // or dl, cl + LONG $0x6cb60f44; WORD $0x5824 // movzx r13d, byte [rsp + 88] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + LONG $0x6cb60f44; WORD $0x7024 // movzx r13d, byte [rsp + 112] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xcd // or r13b, cl + WORD $0x0045; BYTE $0xe4 // add r12b, r12b + LONG $0x24640244; BYTE $0x60 // add r12b, byte [rsp + 96] + WORD $0x8944; BYTE $0xe1 // mov ecx, r12d + QUAD $0x0000b024a4b60f44; BYTE $0x00 // movzx r12d, byte [rsp + 176] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xcc // or r12b, cl + QUAD $0x000000a0248cb60f // movzx ecx, byte [rsp + 160] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + WORD $0x8941; BYTE $0xcc // mov r12d, ecx + QUAD $0x000000e0248cb60f // movzx ecx, byte [rsp + 224] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + LONG $0x64b60f44; WORD $0x0824 // movzx r12d, byte [rsp + 8] + WORD $0x0841; BYTE $0xc4 // or r12b, al + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0xd108 // or cl, dl - WORD $0xc108 // or cl, al - LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl + LONG $0x06e6c041 // shl r14b, 6 + WORD $0x0841; BYTE $0xc6 // or r14b, al + WORD $0x0841; BYTE $0xd5 // or r13b, dl + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf0 // or r8b, r14b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x00000088249c0244 // add r11b, byte [rsp + 136] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + LONG $0x04e7c041 // shl r15b, 4 + WORD $0x0845; BYTE $0xcf // or r15b, r9b + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + WORD $0x8844; BYTE $0x20 // mov byte [rax], r12b + QUAD $0x000000f0248cb60f // movzx ecx, byte [rsp + 240] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x01688844 // mov byte [rax + 1], r13b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0844; BYTE $0xfe // or sil, r15b + LONG $0x02408844 // mov byte [rax + 2], r8b + LONG $0x03708840 // mov byte [rax + 3], sil LONG $0x80c38148; WORD $0x0000; BYTE $0x00 // add rbx, 128 - LONG $0x04c68348 // add rsi, 4 - LONG $0x24748948; BYTE $0x08 // mov qword [rsp + 8], rsi - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB4_128 - LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] - JMP LBB4_148 + LONG $0x04c08348 // add rax, 4 + QUAD $0x0000008024848948 // mov qword [rsp + 128], rax + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + JNE LBB4_135 + QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x0000011024948b4c // mov r10, qword [rsp + 272] + JMP LBB4_146 -LBB4_130: - LONG $0x2474894c; BYTE $0x68 // mov qword [rsp + 104], r14 +LBB4_137: + LONG $0x2464894c; BYTE $0x68 // mov qword [rsp + 104], r12 -LBB4_131: +LBB4_138: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JE LBB4_137 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JE LBB4_144 WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] -LBB4_156: +LBB4_154: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e1c3846 // cmp byte [rsi + r9], r11b WORD $0x950f; BYTE $0xd3 // setne bl @@ -20228,129 +21325,65 @@ LBB4_156: WORD $0xd030 // xor al, dl LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB4_156 - JMP LBB4_159 + JNE LBB4_154 + JMP LBB4_157 -LBB4_134: - LONG $0x2474894c; BYTE $0x68 // mov qword [rsp + 104], r14 +LBB4_141: + LONG $0x2464894c; BYTE $0x68 // mov qword [rsp + 104], r12 -LBB4_135: +LBB4_142: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB4_157 + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JNE LBB4_155 -LBB4_137: +LBB4_144: WORD $0x3145; BYTE $0xc9 // xor r9d, r9d LONG $0x01c0f641 // test r8b, 1 - JE LBB4_179 - JMP LBB4_161 + JE LBB4_174 + JMP LBB4_159 -LBB4_138: - WORD $0x894d; BYTE $0xf4 // mov r12, r14 +LBB4_145: + WORD $0x894d; BYTE $0xe7 // mov r15, r12 + WORD $0x8948; BYTE $0xf3 // mov rbx, rsi -LBB4_139: +LBB4_146: LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x394d; BYTE $0xf2 // cmp r10, r14 + JGE LBB4_174 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JE LBB4_146 - WORD $0x894d; BYTE $0xc1 // mov r9, r8 - LONG $0xfee18349 // and r9, -2 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d + WORD $0x014d; BYTE $0xf2 // add r10, r14 + JNE LBB4_175 + WORD $0xf631 // xor esi, esi + JMP LBB4_177 -LBB4_142: - WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x2e394466 // cmp word [rsi], r13w - WORD $0x950f; BYTE $0xd2 // setne dl - WORD $0xdaf6 // neg dl - WORD $0x894c; BYTE $0xf7 // mov rdi, r14 +LBB4_149: + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + +LBB4_150: + WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x14b60f45; BYTE $0x3c // movzx r10d, byte [r12 + rdi] - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d + WORD $0x894d; BYTE $0xe6 // mov r14, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xd2 // xor dl, r10b - WORD $0xd320 // and bl, dl - WORD $0x3044; BYTE $0xd3 // xor bl, r10b + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b LONG $0x3c1c8841 // mov byte [r12 + rdi], bl - LONG $0x02c68349 // add r14, 2 - LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0x04768d48 // lea rsi, [rsi + 4] - WORD $0x950f; BYTE $0xd2 // setne dl - WORD $0xdaf6 // neg dl - WORD $0xda30 // xor dl, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd020 // and al, dl - WORD $0xd830 // xor al, bl - LONG $0x3c048841 // mov byte [r12 + rdi], al - WORD $0x394d; BYTE $0xf1 // cmp r9, r14 - JNE LBB4_142 - JMP LBB4_173 - -LBB4_143: - WORD $0x894d; BYTE $0xf4 // mov r12, r14 - -LBB4_144: - LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - WORD $0x294d; BYTE $0xd0 // sub r8, r10 - WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB4_171 - -LBB4_146: - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - JMP LBB4_173 - -LBB4_147: - WORD $0x894d; BYTE $0xf3 // mov r11, r14 - WORD $0x8948; BYTE $0xf3 // mov rbx, rsi - -LBB4_148: - LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB4_179 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - WORD $0x294d; BYTE $0xd0 // sub r8, r10 - WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB4_175 - WORD $0xf631 // xor esi, esi - JMP LBB4_177 - -LBB4_151: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d - -LBB4_152: - WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xdf // mov rdi, r11 - LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x046e3944 // cmp dword [rsi + 4], r13d LONG $0x08768d48 // lea rsi, [rsi + 8] @@ -20362,23 +21395,23 @@ LBB4_152: WORD $0xe2d2 // shl dl, cl WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl - LONG $0x3e148841 // mov byte [r14 + rdi], dl + LONG $0x3c148841 // mov byte [r12 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB4_152 + JNE LBB4_150 -LBB4_153: +LBB4_151: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_179 + JE LBB4_174 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - JMP LBB4_170 + JMP LBB4_172 -LBB4_157: +LBB4_155: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] -LBB4_158: +LBB4_156: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e1c3846 // cmp byte [rsi + r9], r11b WORD $0x950f; BYTE $0xd3 // setne bl @@ -20406,14 +21439,14 @@ LBB4_158: WORD $0xd030 // xor al, dl LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB4_158 + JNE LBB4_156 -LBB4_159: +LBB4_157: WORD $0x014c; BYTE $0xce // add rsi, r9 LONG $0x01c0f641 // test r8b, 1 - JE LBB4_179 + JE LBB4_174 -LBB4_161: +LBB4_159: WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -20429,32 +21462,37 @@ LBB4_161: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB4_179 + JMP LBB4_174 -LBB4_162: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 +LBB4_160: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB4_163: +LBB4_161: LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xc808 // or al, cl WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0x894d; BYTE $0xe6 // mov r14, r12 + LONG $0x14b60f45; BYTE $0x3c // movzx r10d, byte [r12 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd0 // xor al, r10b WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + WORD $0x3044; BYTE $0xd3 // xor bl, r10b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] LONG $0x10768d48 // lea rsi, [rsi + 16] + LONG $0xd29a0f41 // setp r10b WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x0844; BYTE $0xd0 // or al, r10b WORD $0xd8f6 // neg al WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 @@ -20462,78 +21500,37 @@ LBB4_163: WORD $0xe2d2 // shl dl, cl WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl - LONG $0x3e148841 // mov byte [r14 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB4_163 - -LBB4_164: - LONG $0x01c0f641 // test r8b, 1 - JE LBB4_179 - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - JMP LBB4_170 - -LBB4_166: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d - -LBB4_167: - WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xdf // mov rdi, r11 - LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3e1c8841 // mov byte [r14 + rdi], bl - LONG $0x02c38349 // add r11, 2 - LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0x10768d48 // lea rsi, [rsi + 16] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0xd830 // xor al, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0xc220 // and dl, al - WORD $0xda30 // xor dl, bl - LONG $0x3e148841 // mov byte [r14 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB4_167 + LONG $0x3c148841 // mov byte [r12 + rdi], dl + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB4_161 -LBB4_168: +LBB4_162: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_179 - WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - -LBB4_170: - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xda // mov rdx, r11 - LONG $0x03eac148 // shr rdx, 3 - LONG $0x16348a41 // mov sil, byte [r14 + rdx] + JE LBB4_174 + LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xd8 // mov rax, r11 + LONG $0x03e8c148 // shr rax, 3 + LONG $0x04348a41 // mov sil, byte [r12 + rax] LONG $0x07e38041 // and r11b, 7 WORD $0x01b3 // mov bl, 1 WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf0 // xor al, sil - WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl WORD $0x3040; BYTE $0xf3 // xor bl, sil - LONG $0x161c8841 // mov byte [r14 + rdx], bl - JMP LBB4_179 + LONG $0x041c8841 // mov byte [r12 + rax], bl + JMP LBB4_174 -LBB4_171: +LBB4_164: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xf6 // xor r14d, r14d -LBB4_172: +LBB4_165: WORD $0x8948; BYTE $0xf0 // mov rax, rsi LONG $0x2e394466 // cmp word [rsi], r13w WORD $0x950f; BYTE $0xd2 // setne dl @@ -20562,11 +21559,11 @@ LBB4_172: WORD $0xd830 // xor al, bl LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x394d; BYTE $0xf1 // cmp r9, r14 - JNE LBB4_172 + JNE LBB4_165 -LBB4_173: +LBB4_166: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_179 + JE LBB4_174 LONG $0x2e394466 // cmp word [rsi], r13w WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -20580,85 +21577,149 @@ LBB4_173: WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil - LONG $0x141c8841 // mov byte [r12 + rdx], bl - JMP LBB4_179 + JMP LBB4_173 -LBB4_175: +LBB4_168: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + +LBB4_169: + WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xdf // mov rdi, r11 + LONG $0x03efc148 // shr rdi, 3 + WORD $0x894d; BYTE $0xe6 // mov r14, r12 + LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xc320 // and bl, al + WORD $0x3044; BYTE $0xcb // xor bl, r9b + LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x02c38349 // add r11, 2 + LONG $0x086e394c // cmp qword [rsi + 8], r13 + LONG $0x10768d48 // lea rsi, [rsi + 16] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0xd830 // xor al, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b2 // mov dl, 1 + WORD $0xe2d2 // shl dl, cl + WORD $0xc220 // and dl, al + WORD $0xda30 // xor dl, bl + LONG $0x3c148841 // mov byte [r12 + rdi], dl + WORD $0x394d; BYTE $0xda // cmp r10, r11 + JNE LBB4_169 + +LBB4_170: + LONG $0x01c0f641 // test r8b, 1 + JE LBB4_174 + WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 + +LBB4_172: + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xda // mov rdx, r11 + LONG $0x03eac148 // shr rdx, 3 + LONG $0x14348a41 // mov sil, byte [r12 + rdx] + LONG $0x07e38041 // and r11b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf0 // xor al, sil + WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xf3 // xor bl, sil + +LBB4_173: + LONG $0x141c8841 // mov byte [r12 + rdx], bl + +LBB4_174: + MOVQ 320(SP), SP + RET + +LBB4_175: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0xf631 // xor esi, esi - WORD $0x894d; BYTE $0xde // mov r14, r11 + WORD $0x894d; BYTE $0xfe // mov r14, r15 LBB4_176: WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl WORD $0xdaf6 // neg dl WORD $0x8948; BYTE $0xf7 // mov rdi, rsi LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b + LONG $0x14b60f45; BYTE $0x3e // movzx r10d, byte [r14 + rdi] WORD $0xf189 // mov ecx, esi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd020 // and al, dl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd2 // xor dl, r10b + WORD $0x2041; BYTE $0xd3 // and r11b, dl + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x3e1c8845 // mov byte [r14 + rdi], r11b LONG $0x02c68348 // add rsi, 2 LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] LONG $0x085b8d48 // lea rbx, [rbx + 8] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xc1 // xor r9b, al + LONG $0xd29a0f41 // setp r10b + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0844; BYTE $0xd2 // or dl, r10b + WORD $0xdaf6 // neg dl + WORD $0x3044; BYTE $0xda // xor dl, r11b WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0x2044; BYTE $0xca // and dl, r9b - WORD $0xc230 // xor dl, al - LONG $0x3e148841 // mov byte [r14 + rdi], dl - WORD $0x3949; BYTE $0xf2 // cmp r10, rsi + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd020 // and al, dl + WORD $0x3044; BYTE $0xd8 // xor al, r11b + LONG $0x3e048841 // mov byte [r14 + rdi], al + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi JNE LBB4_176 LBB4_177: LONG $0x01c0f641 // test r8b, 1 - JE LBB4_179 + JE LBB4_174 WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xf2 // mov rdx, rsi - LONG $0x03eac148 // shr rdx, 3 - LONG $0x133c8a41 // mov dil, byte [r11 + rdx] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x073c8a41 // mov dil, byte [r15 + rax] LONG $0x07e68040 // and sil, 7 WORD $0x01b3 // mov bl, 1 WORD $0xf189 // mov ecx, esi WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xfa // xor dl, dil + WORD $0xd320 // and bl, dl WORD $0x3040; BYTE $0xfb // xor bl, dil - LONG $0x131c8841 // mov byte [r11 + rdx], bl + LONG $0x071c8841 // mov byte [r15 + rax], bl + JMP LBB4_174 LBB4_179: - MOVQ 304(SP), SP - RET - -LBB4_180: LONG $0xf0e28349 // and r10, -16 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi - QUAD $0x0000012024848948 // mov qword [rsp + 288], rax - QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + QUAD $0x0000013024848948 // mov qword [rsp + 304], rax + QUAD $0x000000d82494894c // mov qword [rsp + 216], r10 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax LONG $0xc3b60f41 // movzx eax, r11b LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 - QUAD $0x000100248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm1 + QUAD $0x000120248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 288], xmm1 WORD $0xc031 // xor eax, eax - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 -LBB4_181: +LBB4_180: WORD $0x8949; BYTE $0xc1 // mov r9, rax QUAD $0x0000009824848948 // mov qword [rsp + 152], rax WORD $0x8948; BYTE $0xc1 // mov rcx, rax @@ -20695,14 +21756,14 @@ LBB4_181: LONG $0x6e0f4466; BYTE $0xc9 // movd xmm9, ecx LONG $0x164cb60f; BYTE $0x09 // movzx ecx, byte [rsi + rdx + 9] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x0000d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm0 + QUAD $0x0000c024847f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm0 LONG $0x164cb60f; BYTE $0x0a // movzx ecx, byte [rsi + rdx + 10] LONG $0x6e0f4466; BYTE $0xe1 // movd xmm12, ecx LONG $0x164cb60f; BYTE $0x0b // movzx ecx, byte [rsi + rdx + 11] LONG $0x6e0f4466; BYTE $0xe9 // movd xmm13, ecx LONG $0x164cb60f; BYTE $0x0c // movzx ecx, byte [rsi + rdx + 12] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x0000e024847f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm0 + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 LONG $0x164cb60f; BYTE $0x0d // movzx ecx, byte [rsi + rdx + 13] LONG $0x6e0f4466; BYTE $0xd9 // movd xmm11, ecx LONG $0x164cb60f; BYTE $0x0e // movzx ecx, byte [rsi + rdx + 14] @@ -20710,10 +21771,10 @@ LBB4_181: LONG $0x164cb60f; BYTE $0x0f // movzx ecx, byte [rsi + rdx + 15] LONG $0xc16e0f66 // movd xmm0, ecx QUAD $0x0000b024847f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm0 - LONG $0x24548948; BYTE $0x18 // mov qword [rsp + 24], rdx + LONG $0x24548948; BYTE $0x20 // mov qword [rsp + 32], rdx WORD $0x8948; BYTE $0xd1 // mov rcx, rdx LONG $0x20c98348 // or rcx, 32 - LONG $0x244c8948; BYTE $0x28 // mov qword [rsp + 40], rcx + LONG $0x244c8948; BYTE $0x18 // mov qword [rsp + 24], rcx LONG $0x40cb8349 // or r11, 64 LONG $0x245c894c; BYTE $0x70 // mov qword [rsp + 112], r11 LONG $0x60c88349 // or r8, 96 @@ -20726,9 +21787,9 @@ LBB4_181: LONG $0xe0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 224 LONG $0x00cf8149; WORD $0x0001; BYTE $0x00 // or r15, 256 LONG $0x20cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 288 - QUAD $0x000000c024bc8948 // mov qword [rsp + 192], rdi + QUAD $0x000000e024bc8948 // mov qword [rsp + 224], rdi LONG $0x40c98149; WORD $0x0001; BYTE $0x00 // or r9, 320 - LONG $0x244c894c; BYTE $0x20 // mov qword [rsp + 32], r9 + LONG $0x244c894c; BYTE $0x28 // mov qword [rsp + 40], r9 LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] LONG $0x60cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 352 LONG $0x245c8948; BYTE $0x30 // mov qword [rsp + 48], rbx @@ -20742,7 +21803,7 @@ LBB4_181: WORD $0x8948; BYTE $0xd1 // mov rcx, rdx LONG $0xe0c98148; WORD $0x0001; BYTE $0x00 // or rcx, 480 LONG $0x244c8948; BYTE $0x38 // mov qword [rsp + 56], rcx - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] LONG $0x203a0f66; WORD $0x1624; BYTE $0x01 // pinsrb xmm4, byte [rsi + rdx], 1 QUAD $0x021e24203a0f4266 // pinsrb xmm4, byte [rsi + r11], 2 QUAD $0x030624203a0f4266 // pinsrb xmm4, byte [rsi + r8], 3 @@ -20760,7 +21821,7 @@ LBB4_181: LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] LONG $0x203a0f66; WORD $0x1e24; BYTE $0x0e // pinsrb xmm4, byte [rsi + rbx], 14 LONG $0x203a0f66; WORD $0x0e24; BYTE $0x0f // pinsrb xmm4, byte [rsi + rcx], 15 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] QUAD $0x01011e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 1], 1 QUAD $0x011e5c203a0f4266; BYTE $0x02 // pinsrb xmm3, byte [rsi + r11 + 1], 2 QUAD $0x01065c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r8 + 1], 3 @@ -20781,17 +21842,17 @@ LBB4_181: LONG $0x24448948; BYTE $0x58 // mov qword [rsp + 88], rax LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x0e01065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 1], 14 - QUAD $0x00010024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 256] + QUAD $0x00012024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 288] LONG $0xe6740f66 // pcmpeqb xmm4, xmm6 QUAD $0x0f010e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 1], 15 LONG $0xde740f66 // pcmpeqb xmm3, xmm6 QUAD $0x00000100856f0f66 // movdqa xmm0, oword 256[rbp] /* [rip + .LCPI4_16] */ LONG $0xd8df0f66 // pandn xmm3, xmm0 LONG $0xdcfc0f66 // paddb xmm3, xmm4 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] LONG $0x0654b60f; BYTE $0x10 // movzx edx, byte [rsi + rax + 16] LONG $0x6e0f4466; BYTE $0xd2 // movd xmm10, edx - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0102066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 2], 1 LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] QUAD $0x02166c203a0f4266; BYTE $0x02 // pinsrb xmm5, byte [rsi + r10 + 2], 2 @@ -20804,9 +21865,9 @@ LBB4_181: WORD $0x894c; BYTE $0xeb // mov rbx, r13 QUAD $0x022e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r13 + 2], 7 QUAD $0x023e6c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r15 + 2], 8 - QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] + QUAD $0x000000e024948b48 // mov rdx, qword [rsp + 224] QUAD $0x0902166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 2], 9 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a02066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 2], 10 QUAD $0x02266c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r12 + 2], 11 LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] @@ -20816,7 +21877,7 @@ LBB4_181: QUAD $0x022e6c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r13 + 2], 14 LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] QUAD $0x02066c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r8 + 2], 15 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0103067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 3], 1 QUAD $0x03167c203a0f4266; BYTE $0x02 // pinsrb xmm7, byte [rsi + r10 + 3], 2 QUAD $0x03033e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 3], 3 @@ -20826,7 +21887,7 @@ LBB4_181: QUAD $0x07031e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 3], 7 QUAD $0x033e7c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r15 + 3], 8 QUAD $0x0903167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 3], 9 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a03067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 3], 10 QUAD $0x03267c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r12 + 3], 11 QUAD $0x0c030e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 3], 12 @@ -20834,7 +21895,7 @@ LBB4_181: QUAD $0x0d03067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 3], 13 QUAD $0x032e7c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rsi + r13 + 3], 14 QUAD $0x03067c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r8 + 3], 15 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0104064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 4], 1 QUAD $0x04164c203a0f4266; BYTE $0x02 // pinsrb xmm1, byte [rsi + r10 + 4], 2 QUAD $0x03043e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 4], 3 @@ -20848,7 +21909,7 @@ LBB4_181: QUAD $0x043e4c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r15 + 4], 8 QUAD $0x0904164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 4], 9 WORD $0x8948; BYTE $0xd3 // mov rbx, rdx - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x0a04164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 4], 10 QUAD $0x04264c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r12 + 4], 11 QUAD $0x0c040e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 4], 12 @@ -20863,7 +21924,7 @@ LBB4_181: QUAD $0x00000120856f0f66 // movdqa xmm0, oword 288[rbp] /* [rip + .LCPI4_18] */ LONG $0xf8df0f66 // pandn xmm7, xmm0 LONG $0xfdeb0f66 // por xmm7, xmm5 - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] LONG $0x0e54b60f; BYTE $0x11 // movzx edx, byte [rsi + rcx + 17] LONG $0xe26e0f66 // movd xmm4, edx LONG $0xce740f66 // pcmpeqb xmm1, xmm6 @@ -20877,7 +21938,7 @@ LBB4_181: LONG $0xcbeb0f66 // por xmm1, xmm3 LONG $0x0e54b60f; BYTE $0x13 // movzx edx, byte [rsi + rcx + 19] LONG $0xea6e0f66 // movd xmm5, edx - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x01051654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 5], 1 QUAD $0x051654203a0f4266; BYTE $0x02 // pinsrb xmm2, byte [rsi + r10 + 5], 2 LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] @@ -20892,7 +21953,7 @@ LBB4_181: QUAD $0x053e54203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r15 + 5], 8 WORD $0x8949; BYTE $0xd9 // mov r9, rbx QUAD $0x09051e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 5], 9 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0a050654203a0f66 // pinsrb xmm2, byte [rsi + rax + 5], 10 LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x0b050e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 5], 11 @@ -20952,7 +22013,7 @@ LBB4_181: QUAD $0x00000150856f0f66 // movdqa xmm0, oword 336[rbp] /* [rip + .LCPI4_21] */ LONG $0xdf0f4466; BYTE $0xc0 // pandn xmm8, xmm0 LONG $0xeb0f4466; BYTE $0xc2 // por xmm8, xmm2 - LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] + LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] LONG $0x54b60f42; WORD $0x140e // movzx edx, byte [rsi + r9 + 20] LONG $0xda6e0f66 // movd xmm3, edx WORD $0x8948; BYTE $0xc8 // mov rax, rcx @@ -20963,7 +22024,7 @@ LBB4_181: LONG $0xeb0f4566; BYTE $0xf0 // por xmm14, xmm8 LONG $0x54b60f42; WORD $0x150e // movzx edx, byte [rsi + r9 + 21] LONG $0xd26e0f66 // movd xmm2, edx - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] QUAD $0x080e4c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rcx + 8], 1 QUAD $0x08164c203a0f4666; BYTE $0x02 // pinsrb xmm9, byte [rsi + r10 + 8], 2 LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] @@ -20976,9 +22037,9 @@ LBB4_181: QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] QUAD $0x083e4c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r15 + 8], 7 QUAD $0x08264c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r12 + 8], 8 - QUAD $0x000000c024a48b4c // mov r12, qword [rsp + 192] + QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] QUAD $0x08264c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r12 + 8], 9 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x08164c203a0f4466; BYTE $0x0a // pinsrb xmm9, byte [rsi + rdx + 8], 10 QUAD $0x081e4c203a0f4466; BYTE $0x0b // pinsrb xmm9, byte [rsi + rbx + 8], 11 QUAD $0x08364c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r14 + 8], 12 @@ -20991,7 +22052,7 @@ LBB4_181: LONG $0xca6e0f66 // movd xmm1, edx LONG $0xc66f0f66 // movdqa xmm0, xmm6 LONG $0x740f4466; BYTE $0xce // pcmpeqb xmm9, xmm6 - QUAD $0x00d024b46f0f4466; WORD $0x0000 // movdqa xmm14, oword [rsp + 208] + QUAD $0x00c024b46f0f4466; WORD $0x0000 // movdqa xmm14, oword [rsp + 192] QUAD $0x090e74203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rsi + rcx + 9], 1 QUAD $0x091674203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r10 + 9], 2 QUAD $0x090674203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r8 + 9], 3 @@ -21006,7 +22067,7 @@ LBB4_181: QUAD $0x093e74203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r15 + 9], 8 WORD $0x894d; BYTE $0xe1 // mov r9, r12 QUAD $0x092674203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r12 + 9], 9 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + LONG $0x246c8b4c; BYTE $0x28 // mov r13, qword [rsp + 40] QUAD $0x092e74203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rsi + r13 + 9], 10 QUAD $0x091e74203a0f4466; BYTE $0x0b // pinsrb xmm14, byte [rsi + rbx + 9], 11 WORD $0x894d; BYTE $0xf4 // mov r12, r14 @@ -21057,7 +22118,7 @@ LBB4_181: LONG $0x740f4466; BYTE $0xf6 // pcmpeqb xmm14, xmm6 QUAD $0x000100b5df0f4466; BYTE $0x00 // pandn xmm14, oword 256[rbp] /* [rip + .LCPI4_16] */ LONG $0xfc0f4566; BYTE $0xf1 // paddb xmm14, xmm9 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] LONG $0x0654b60f; BYTE $0x17 // movzx edx, byte [rsi + rax + 23] LONG $0x6e0f4466; BYTE $0xc2 // movd xmm8, edx LONG $0x740f4466; BYTE $0xe6 // pcmpeqb xmm12, xmm6 @@ -21067,8 +22128,8 @@ LBB4_181: LONG $0xeb0f4566; BYTE $0xec // por xmm13, xmm12 LONG $0x0654b60f; BYTE $0x18 // movzx edx, byte [rsi + rax + 24] LONG $0x6e0f4466; BYTE $0xe2 // movd xmm12, edx - QUAD $0x00e0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 224] - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x00f0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 240] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0c064c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rax + 12], 1 QUAD $0x0c164c203a0f4666; BYTE $0x02 // pinsrb xmm9, byte [rsi + r10 + 12], 2 WORD $0x894c; BYTE $0xc0 // mov rax, r8 @@ -21084,7 +22145,7 @@ LBB4_181: LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] QUAD $0x0c0e4c203a0f4466; BYTE $0x08 // pinsrb xmm9, byte [rsi + rcx + 12], 8 QUAD $0x0c0e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r9 + 12], 9 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0c3e4c203a0f4466; BYTE $0x0a // pinsrb xmm9, byte [rsi + rdi + 12], 10 QUAD $0x0c3e4c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r15 + 12], 11 QUAD $0x0c264c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r12 + 12], 12 @@ -21093,7 +22154,7 @@ LBB4_181: QUAD $0x0c164c203a0f4466; BYTE $0x0e // pinsrb xmm9, byte [rsi + rdx + 12], 14 LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] QUAD $0x0c164c203a0f4466; BYTE $0x0f // pinsrb xmm9, byte [rsi + rdx + 12], 15 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x0d165c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rdx + 13], 1 QUAD $0x0d165c203a0f4666; BYTE $0x02 // pinsrb xmm11, byte [rsi + r10 + 13], 2 QUAD $0x0d065c203a0f4466; BYTE $0x03 // pinsrb xmm11, byte [rsi + rax + 13], 3 @@ -21111,7 +22172,7 @@ LBB4_181: QUAD $0x0d165c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rdx + 13], 14 LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] QUAD $0x0d165c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rdx + 13], 15 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x0e167c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rsi + rdx + 14], 1 QUAD $0x0e167c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r10 + 14], 2 QUAD $0x0e067c203a0f4466; BYTE $0x03 // pinsrb xmm15, byte [rsi + rax + 14], 3 @@ -21130,7 +22191,7 @@ LBB4_181: LONG $0x740f4466; BYTE $0xce // pcmpeqb xmm9, xmm6 QUAD $0x0001308ddf0f4466; BYTE $0x00 // pandn xmm9, oword 304[rbp] /* [rip + .LCPI4_19] */ LONG $0xeb0f4566; BYTE $0xcd // por xmm9, xmm13 - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] LONG $0x0e54b60f; BYTE $0x19 // movzx edx, byte [rsi + rcx + 25] LONG $0x6e0f4466; BYTE $0xea // movd xmm13, edx QUAD $0x000160b5f80f4466; BYTE $0x00 // psubb xmm14, oword 352[rbp] /* [rip + .LCPI4_22] */ @@ -21148,7 +22209,7 @@ LBB4_181: LONG $0x0e54b60f; BYTE $0x1b // movzx edx, byte [rsi + rcx + 27] LONG $0x6e0f4466; BYTE $0xda // movd xmm11, edx QUAD $0x0000b024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 176] - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] + LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] QUAD $0x0f1e74203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r11 + 15], 1 QUAD $0x0f1674203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rsi + r10 + 15], 2 LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] @@ -21173,7 +22234,7 @@ LBB4_181: LONG $0x740f4166; BYTE $0xf6 // pcmpeqb xmm6, xmm14 LONG $0x75df0f66; BYTE $0x60 // pandn xmm6, oword 96[rbp] /* [rip + .LCPI4_6] */ LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] LONG $0x0654b60f; BYTE $0x1c // movzx edx, byte [rsi + rax + 28] LONG $0x6e0f4466; BYTE $0xfa // movd xmm15, edx LONG $0xeb0f4166; BYTE $0xf1 // por xmm6, xmm9 @@ -21227,10 +22288,10 @@ LBB4_181: LONG $0x740f4166; BYTE $0xe6 // pcmpeqb xmm4, xmm14 QUAD $0x00000100a5df0f66 // pandn xmm4, oword 256[rbp] /* [rip + .LCPI4_16] */ LONG $0xfc0f4166; BYTE $0xe2 // paddb xmm4, xmm10 - LONG $0x247c8b48; BYTE $0x18 // mov rdi, qword [rsp + 24] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] LONG $0x3e54b60f; BYTE $0x1e // movzx edx, byte [rsi + rdi + 30] LONG $0x6e0f4466; BYTE $0xd2 // movd xmm10, edx - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0112067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 18], 1 QUAD $0x0113066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 19], 1 QUAD $0x0114065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 20], 1 @@ -21272,7 +22333,7 @@ LBB4_181: LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0812067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 18], 8 QUAD $0x120e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r9 + 18], 9 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] QUAD $0x0a123e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 18], 10 LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] QUAD $0x12167c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r10 + 18], 11 @@ -21449,7 +22510,7 @@ LBB4_181: QUAD $0x1d1e4c203a0f4666; BYTE $0x06 // pinsrb xmm9, byte [rsi + r11 + 29], 6 QUAD $0x1e1e54203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r11 + 30], 6 QUAD $0x1f1e74203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r11 + 31], 6 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] + QUAD $0x0000008024848b4c // mov r8, qword [rsp + 128] WORD $0x8948; BYTE $0xda // mov rdx, rbx QUAD $0x1c1e7c203a0f4466; BYTE $0x07 // pinsrb xmm15, byte [rsi + rbx + 28], 7 QUAD $0x1d1e4c203a0f4466; BYTE $0x07 // pinsrb xmm9, byte [rsi + rbx + 29], 7 @@ -21519,30 +22580,30 @@ LBB4_181: LONG $0x610f4166; BYTE $0xc0 // punpcklwd xmm0, xmm8 LONG $0x690f4166; BYTE $0xe0 // punpckhwd xmm4, xmm8 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - LONG $0x7f0f41f3; WORD $0x8e64; BYTE $0x30 // movdqu oword [r14 + 4*rcx + 48], xmm4 - LONG $0x7f0f41f3; WORD $0x8e44; BYTE $0x20 // movdqu oword [r14 + 4*rcx + 32], xmm0 - LONG $0x7f0f41f3; WORD $0x8e4c; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm1 - LONG $0x7f0f41f3; WORD $0x8e14 // movdqu oword [r14 + 4*rcx], xmm2 + LONG $0x7f0f41f3; WORD $0x8864; BYTE $0x30 // movdqu oword [r8 + 4*rcx + 48], xmm4 + LONG $0x7f0f41f3; WORD $0x8844; BYTE $0x20 // movdqu oword [r8 + 4*rcx + 32], xmm0 + LONG $0x7f0f41f3; WORD $0x884c; BYTE $0x10 // movdqu oword [r8 + 4*rcx + 16], xmm1 + LONG $0x7f0f41f3; WORD $0x8814 // movdqu oword [r8 + 4*rcx], xmm2 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000f0248c3b48 // cmp rcx, qword [rsp + 240] - JNE LBB4_181 - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x000000f024943b4c // cmp r10, qword [rsp + 240] + QUAD $0x000000d8248c3b48 // cmp rcx, qword [rsp + 216] + JNE LBB4_180 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x000000d824943b4c // cmp r10, qword [rsp + 216] LONG $0x245c8a44; BYTE $0x08 // mov r11b, byte [rsp + 8] - QUAD $0x0000012024b48b48 // mov rsi, qword [rsp + 288] - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] + QUAD $0x0000013024b48b48 // mov rsi, qword [rsp + 304] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] JNE LBB4_43 - JMP LBB4_131 + JMP LBB4_138 -LBB4_183: +LBB4_182: LONG $0xf0e28349 // and r10, -16 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi - QUAD $0x000000f824848948 // mov qword [rsp + 248], rax - QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + QUAD $0x0000010824848948 // mov qword [rsp + 264], rax + QUAD $0x000000d82494894c // mov qword [rsp + 216], r10 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax LONG $0xc3b60f41 // movzx eax, r11b LONG $0xc86e0f66 // movd xmm1, eax @@ -21550,9 +22611,9 @@ LBB4_183: LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 QUAD $0x0000a0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm1 WORD $0xc031 // xor eax, eax - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 -LBB4_184: +LBB4_183: WORD $0x8949; BYTE $0xc1 // mov r9, rax QUAD $0x0000009824848948 // mov qword [rsp + 152], rax LONG $0x05e1c149 // shl r9, 5 @@ -21585,7 +22646,7 @@ LBB4_184: LONG $0x6e0f4466; BYTE $0xf0 // movd xmm14, eax LONG $0x44b60f42; WORD $0x080e // movzx eax, byte [rsi + r9 + 8] LONG $0xc06e0f66 // movd xmm0, eax - QUAD $0x0000d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm0 + QUAD $0x0000c024847f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm0 LONG $0x44b60f42; WORD $0x090e // movzx eax, byte [rsi + r9 + 9] LONG $0x6e0f4466; BYTE $0xd8 // movd xmm11, eax LONG $0x44b60f42; WORD $0x0a0e // movzx eax, byte [rsi + r9 + 10] @@ -21594,14 +22655,14 @@ LBB4_184: LONG $0x6e0f4466; BYTE $0xe8 // movd xmm13, eax LONG $0x44b60f42; WORD $0x0c0e // movzx eax, byte [rsi + r9 + 12] LONG $0xc06e0f66 // movd xmm0, eax - QUAD $0x0000e024847f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm0 + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 LONG $0x44b60f42; WORD $0x0d0e // movzx eax, byte [rsi + r9 + 13] LONG $0xf06e0f66 // movd xmm6, eax LONG $0x44b60f42; WORD $0x0e0e // movzx eax, byte [rsi + r9 + 14] LONG $0x6e0f4466; BYTE $0xf8 // movd xmm15, eax LONG $0x44b60f42; WORD $0x0f0e // movzx eax, byte [rsi + r9 + 15] LONG $0xc06e0f66 // movd xmm0, eax - QUAD $0x0000c024847f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm0 + QUAD $0x0000e024847f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm0 LONG $0x244c894c; BYTE $0x48 // mov qword [rsp + 72], r9 WORD $0x894c; BYTE $0xc9 // mov rcx, r9 LONG $0x20c98348 // or rcx, 32 @@ -21614,7 +22675,7 @@ LBB4_184: LONG $0xa0ca8149; WORD $0x0000; BYTE $0x00 // or r10, 160 LONG $0x2454894c; BYTE $0x40 // mov qword [rsp + 64], r10 LONG $0xc0cf8149; WORD $0x0000; BYTE $0x00 // or r15, 192 - LONG $0x247c894c; BYTE $0x18 // mov qword [rsp + 24], r15 + LONG $0x247c894c; BYTE $0x20 // mov qword [rsp + 32], r15 LONG $0xe0cb8149; WORD $0x0000; BYTE $0x00 // or r11, 224 LONG $0x00ce8149; WORD $0x0001; BYTE $0x00 // or r14, 256 QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 @@ -21624,13 +22685,13 @@ LBB4_184: LONG $0x60cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 352 LONG $0x245c8948; BYTE $0x58 // mov qword [rsp + 88], rbx LONG $0x80cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 384 - LONG $0x247c8948; BYTE $0x20 // mov qword [rsp + 32], rdi + LONG $0x247c8948; BYTE $0x28 // mov qword [rsp + 40], rdi WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x01a00d48; WORD $0x0000 // or rax, 416 LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax + LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax WORD $0x894c; BYTE $0xcf // mov rdi, r9 LONG $0xe0cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 480 LONG $0x203a0f66; WORD $0x0e24; BYTE $0x01 // pinsrb xmm4, byte [rsi + rcx], 1 @@ -21646,7 +22707,7 @@ LBB4_184: LONG $0x203a0f66; WORD $0x1624; BYTE $0x09 // pinsrb xmm4, byte [rsi + rdx], 9 QUAD $0x0a0624203a0f4266 // pinsrb xmm4, byte [rsi + r8], 10 LONG $0x203a0f66; WORD $0x1e24; BYTE $0x0b // pinsrb xmm4, byte [rsi + rbx], 11 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] LONG $0x203a0f66; WORD $0x0e24; BYTE $0x0c // pinsrb xmm4, byte [rsi + rcx], 12 LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] QUAD $0x0d0e24203a0f4266 // pinsrb xmm4, byte [rsi + r9], 13 @@ -21693,7 +22754,7 @@ LBB4_184: QUAD $0x022e6c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r13 + 2], 4 LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] QUAD $0x021e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r11 + 2], 5 - LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] QUAD $0x022e6c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r13 + 2], 6 WORD $0x894c; BYTE $0xd3 // mov rbx, r10 QUAD $0x02166c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r10 + 2], 7 @@ -21705,11 +22766,11 @@ LBB4_184: QUAD $0x02166c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r10 + 2], 10 LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] QUAD $0x02366c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r14 + 2], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0c02066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 2], 12 LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x0d02066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 2], 13 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0e02066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 2], 14 LONG $0x244c8948; BYTE $0x50 // mov qword [rsp + 80], rcx QUAD $0x0f020e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 2], 15 @@ -21724,11 +22785,11 @@ LBB4_184: QUAD $0x030e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r9 + 3], 9 QUAD $0x03167c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r10 + 3], 10 QUAD $0x03367c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r14 + 3], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x0c03067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 3], 12 LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x0d03067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 3], 13 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0e03067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 3], 14 QUAD $0x0f030e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 3], 15 QUAD $0x04164c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rdx + 4], 1 @@ -21743,11 +22804,11 @@ LBB4_184: QUAD $0x040e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r9 + 4], 9 QUAD $0x04164c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r10 + 4], 10 QUAD $0x04364c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r14 + 4], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x04064c203a0f4466; BYTE $0x0c // pinsrb xmm9, byte [rsi + rax + 4], 12 LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] QUAD $0x04064c203a0f4666; BYTE $0x0d // pinsrb xmm9, byte [rsi + r8 + 4], 13 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x04164c203a0f4466; BYTE $0x0e // pinsrb xmm9, byte [rsi + rdx + 4], 14 QUAD $0x040e4c203a0f4466; BYTE $0x0f // pinsrb xmm9, byte [rsi + rcx + 4], 15 LONG $0xe9740f66 // pcmpeqb xmm5, xmm1 @@ -21780,7 +22841,7 @@ LBB4_184: QUAD $0x04053e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 5], 4 WORD $0x894c; BYTE $0xdf // mov rdi, r11 QUAD $0x051e54203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r11 + 5], 5 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x06050654203a0f66 // pinsrb xmm2, byte [rsi + rax + 5], 6 QUAD $0x00000110249c8948 // mov qword [rsp + 272], rbx QUAD $0x07051e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 5], 7 @@ -21789,11 +22850,11 @@ LBB4_184: QUAD $0x050e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r9 + 5], 9 QUAD $0x051654203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r10 + 5], 10 QUAD $0x053654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r14 + 5], 11 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x0c051654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 5], 12 WORD $0x894c; BYTE $0xc1 // mov rcx, r8 QUAD $0x050654203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r8 + 5], 13 - LONG $0x24448b4c; BYTE $0x28 // mov r8, qword [rsp + 40] + LONG $0x24448b4c; BYTE $0x18 // mov r8, qword [rsp + 24] QUAD $0x050654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r8 + 5], 14 LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] QUAD $0x051e54203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r11 + 5], 15 @@ -21804,7 +22865,7 @@ LBB4_184: LONG $0x246c8b4c; BYTE $0x70 // mov r13, qword [rsp + 112] QUAD $0x062e44203a0f4666; BYTE $0x04 // pinsrb xmm8, byte [rsi + r13 + 6], 4 QUAD $0x063e44203a0f4466; BYTE $0x05 // pinsrb xmm8, byte [rsi + rdi + 6], 5 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x060644203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rax + 6], 6 WORD $0x8949; BYTE $0xc5 // mov r13, rax QUAD $0x061e44203a0f4466; BYTE $0x07 // pinsrb xmm8, byte [rsi + rbx + 6], 7 @@ -21856,7 +22917,7 @@ LBB4_184: LONG $0xeb0f4166; BYTE $0xc8 // por xmm1, xmm8 LONG $0x3e54b60f; BYTE $0x15 // movzx edx, byte [rsi + rdi + 21] LONG $0xd26e0f66 // movd xmm2, edx - QUAD $0x0000d024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 208] + QUAD $0x0000c024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 192] LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x01080644203a0f66 // pinsrb xmm0, byte [rsi + rax + 8], 1 QUAD $0x082644203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r12 + 8], 2 @@ -21866,7 +22927,7 @@ LBB4_184: WORD $0x894d; BYTE $0xd8 // mov r8, r11 LONG $0x24748b4c; BYTE $0x40 // mov r14, qword [rsp + 64] QUAD $0x083644203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r14 + 8], 5 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] QUAD $0x06081644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 8], 6 QUAD $0x00000110249c8b4c // mov r11, qword [rsp + 272] QUAD $0x081e44203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r11 + 8], 7 @@ -21877,14 +22938,14 @@ LBB4_184: QUAD $0x083e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r15 + 8], 10 LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] QUAD $0x0b081644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 8], 11 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] QUAD $0x0c081644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 8], 12 QUAD $0x082e44203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r13 + 8], 13 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x0e081644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 8], 14 QUAD $0x0f080e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 8], 15 LONG $0xeb0f4166; BYTE $0xc9 // por xmm1, xmm9 - QUAD $0x0000d0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm1 + QUAD $0x0000c0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm1 LONG $0x3e54b60f; BYTE $0x16 // movzx edx, byte [rsi + rdi + 22] LONG $0xca6e0f66 // movd xmm1, edx LONG $0x740f4166; BYTE $0xc6 // pcmpeqb xmm0, xmm14 @@ -21894,7 +22955,7 @@ LBB4_184: QUAD $0x09265c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r12 + 9], 3 QUAD $0x09065c203a0f4666; BYTE $0x04 // pinsrb xmm11, byte [rsi + r8 + 9], 4 QUAD $0x09365c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r14 + 9], 5 - LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] QUAD $0x092e5c203a0f4666; BYTE $0x06 // pinsrb xmm11, byte [rsi + r13 + 9], 6 QUAD $0x091e5c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r11 + 9], 7 QUAD $0x091e5c203a0f4466; BYTE $0x08 // pinsrb xmm11, byte [rsi + rbx + 9], 8 @@ -21904,11 +22965,11 @@ LBB4_184: QUAD $0x093e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r15 + 9], 10 LONG $0x24548b4c; BYTE $0x58 // mov r10, qword [rsp + 88] QUAD $0x09165c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r10 + 9], 11 - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] QUAD $0x093e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r15 + 9], 12 LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x09165c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rdx + 9], 13 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] QUAD $0x09165c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rdx + 9], 14 LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] QUAD $0x09165c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rdx + 9], 15 @@ -21927,7 +22988,7 @@ LBB4_184: QUAD $0x0a3e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r15 + 10], 12 LONG $0x24748b4c; BYTE $0x10 // mov r14, qword [rsp + 16] QUAD $0x0a3664203a0f4666; BYTE $0x0d // pinsrb xmm12, byte [rsi + r14 + 10], 13 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0a0664203a0f4466; BYTE $0x0e // pinsrb xmm12, byte [rsi + rax + 10], 14 QUAD $0x0a1664203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rsi + rdx + 10], 15 LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] @@ -21949,7 +23010,7 @@ LBB4_184: LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] QUAD $0x0b3e6c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rdi + 11], 13 WORD $0x8949; BYTE $0xfd // mov r13, rdi - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] + LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] QUAD $0x0b0e6c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r9 + 11], 14 QUAD $0x0b166c203a0f4466; BYTE $0x0f // pinsrb xmm13, byte [rsi + rdx + 11], 15 LONG $0x740f4566; BYTE $0xde // pcmpeqb xmm11, xmm14 @@ -21965,7 +23026,7 @@ LBB4_184: LONG $0xeb0f4566; BYTE $0xec // por xmm13, xmm12 LONG $0x3e54b60f; BYTE $0x18 // movzx edx, byte [rsi + rdi + 24] LONG $0x6e0f4466; BYTE $0xe2 // movd xmm12, edx - QUAD $0x00e0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 224] + QUAD $0x00f0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 240] LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] QUAD $0x0c1e4c203a0f4666; BYTE $0x01 // pinsrb xmm9, byte [rsi + r11 + 12], 1 WORD $0x894d; BYTE $0xf0 // mov r8, r14 @@ -21975,7 +23036,7 @@ LBB4_184: LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] QUAD $0x0c364c203a0f4666; BYTE $0x04 // pinsrb xmm9, byte [rsi + r14 + 12], 4 QUAD $0x0c064c203a0f4466; BYTE $0x05 // pinsrb xmm9, byte [rsi + rax + 12], 5 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] QUAD $0x0c164c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rdx + 12], 6 WORD $0x894d; BYTE $0xe7 // mov r15, r12 QUAD $0x0c264c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r12 + 12], 7 @@ -21984,7 +23045,7 @@ LBB4_184: LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] QUAD $0x0c264c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r12 + 12], 10 QUAD $0x0c164c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r10 + 12], 11 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] QUAD $0x0c164c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r10 + 12], 12 QUAD $0x0c2e4c203a0f4666; BYTE $0x0d // pinsrb xmm9, byte [rsi + r13 + 12], 13 QUAD $0x0c0e4c203a0f4666; BYTE $0x0e // pinsrb xmm9, byte [rsi + r9 + 12], 14 @@ -22026,7 +23087,7 @@ LBB4_184: QUAD $0x0e2e7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r13 + 14], 12 WORD $0x894d; BYTE $0xd5 // mov r13, r10 QUAD $0x0e167c203a0f4666; BYTE $0x0d // pinsrb xmm15, byte [rsi + r10 + 14], 13 - LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] + LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] QUAD $0x0e167c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r10 + 14], 14 LONG $0x740f4566; BYTE $0xce // pcmpeqb xmm9, xmm14 QUAD $0x0001308ddf0f4466; BYTE $0x00 // pandn xmm9, oword 304[rbp] /* [rip + .LCPI4_19] */ @@ -22046,7 +23107,7 @@ LBB4_184: LONG $0xeb0f4466; BYTE $0xfe // por xmm15, xmm6 LONG $0x0654b60f; BYTE $0x1b // movzx edx, byte [rsi + rax + 27] LONG $0x6e0f4466; BYTE $0xda // movd xmm11, edx - QUAD $0x0000c024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 192] + QUAD $0x0000e024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 224] LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] QUAD $0x010f0e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 15], 1 QUAD $0x0f0674203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rsi + r8 + 15], 2 @@ -22055,7 +23116,7 @@ LBB4_184: QUAD $0x0f3674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r14 + 15], 4 LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] QUAD $0x050f0e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 15], 5 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] QUAD $0x060f1674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 15], 6 QUAD $0x0f3e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r15 + 15], 7 QUAD $0x080f3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 15], 8 @@ -22074,7 +23135,7 @@ LBB4_184: LONG $0x0654b60f; BYTE $0x1c // movzx edx, byte [rsi + rax + 28] LONG $0x6e0f4466; BYTE $0xfa // movd xmm15, edx LONG $0xeb0f4166; BYTE $0xf1 // por xmm6, xmm9 - QUAD $0x0000c024b47f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm6 + QUAD $0x0000e024b47f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm6 LONG $0x0654b60f; BYTE $0x1d // movzx edx, byte [rsi + rax + 29] LONG $0x6e0f4466; BYTE $0xca // movd xmm9, edx LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] @@ -22083,7 +23144,7 @@ LBB4_184: QUAD $0x100e54203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r9 + 16], 3 QUAD $0x103654203a0f4666; BYTE $0x04 // pinsrb xmm10, byte [rsi + r14 + 16], 4 QUAD $0x100e54203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rcx + 16], 5 - LONG $0x24648b4c; BYTE $0x18 // mov r12, qword [rsp + 24] + LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] QUAD $0x102654203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r12 + 16], 6 QUAD $0x103e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r15 + 16], 7 QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] @@ -22091,11 +23152,11 @@ LBB4_184: QUAD $0x103e54203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rdi + 16], 9 QUAD $0x101e54203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r11 + 16], 10 QUAD $0x101e54203a0f4466; BYTE $0x0b // pinsrb xmm10, byte [rsi + rbx + 16], 11 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x100654203a0f4466; BYTE $0x0c // pinsrb xmm10, byte [rsi + rax + 16], 12 WORD $0x894c; BYTE $0xe8 // mov rax, r13 QUAD $0x102e54203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r13 + 16], 13 - LONG $0x246c8b4c; BYTE $0x28 // mov r13, qword [rsp + 40] + LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] QUAD $0x102e54203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rsi + r13 + 16], 14 QUAD $0x101654203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r10 + 16], 15 QUAD $0x01111664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 17], 1 @@ -22111,7 +23172,7 @@ LBB4_184: QUAD $0x09113e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 17], 9 QUAD $0x111e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r11 + 17], 10 QUAD $0x0b111e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 17], 11 - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] QUAD $0x112664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 17], 12 QUAD $0x0d110664203a0f66 // pinsrb xmm4, byte [rsi + rax + 17], 13 QUAD $0x112e64203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r13 + 17], 14 @@ -22352,7 +23413,7 @@ LBB4_184: QUAD $0x1d3e4c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r15 + 29], 7 QUAD $0x1e3e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r15 + 30], 7 QUAD $0x1f3e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r15 + 31], 7 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] + QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] QUAD $0x1c1e7c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r11 + 28], 8 QUAD $0x1d1e4c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r11 + 29], 8 QUAD $0x1e1e54203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r11 + 30], 8 @@ -22405,9 +23466,9 @@ LBB4_184: LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 LONG $0x6f0f4166; BYTE $0xc0 // movdqa xmm0, xmm8 LONG $0xc6600f66 // punpcklbw xmm0, xmm6 - QUAD $0x0000d0249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 208] + QUAD $0x0000c0249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 192] LONG $0xcb6f0f66 // movdqa xmm1, xmm3 - QUAD $0x0000c024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 192] + QUAD $0x0000e024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 224] LONG $0xcc600f66 // punpcklbw xmm1, xmm4 LONG $0xd16f0f66 // movdqa xmm2, xmm1 LONG $0xd0610f66 // punpcklwd xmm2, xmm0 @@ -22418,41 +23479,41 @@ LBB4_184: LONG $0x610f4166; BYTE $0xc0 // punpcklwd xmm0, xmm8 LONG $0x690f4166; BYTE $0xd8 // punpckhwd xmm3, xmm8 QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] - LONG $0x7f0f41f3; WORD $0x8e5c; BYTE $0x30 // movdqu oword [r14 + 4*rcx + 48], xmm3 - LONG $0x7f0f41f3; WORD $0x8e44; BYTE $0x20 // movdqu oword [r14 + 4*rcx + 32], xmm0 - LONG $0x7f0f41f3; WORD $0x8e4c; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm1 - LONG $0x7f0f41f3; WORD $0x8e14 // movdqu oword [r14 + 4*rcx], xmm2 + LONG $0x5c7f0ff3; WORD $0x308b // movdqu oword [rbx + 4*rcx + 48], xmm3 + LONG $0x447f0ff3; WORD $0x208b // movdqu oword [rbx + 4*rcx + 32], xmm0 + LONG $0x4c7f0ff3; WORD $0x108b // movdqu oword [rbx + 4*rcx + 16], xmm1 + LONG $0x147f0ff3; BYTE $0x8b // movdqu oword [rbx + 4*rcx], xmm2 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000f0248c3b48 // cmp rcx, qword [rsp + 240] - JNE LBB4_184 - QUAD $0x0000010024948b4c // mov r10, qword [rsp + 256] - QUAD $0x000000f024943b4c // cmp r10, qword [rsp + 240] + QUAD $0x000000d8248c3b48 // cmp rcx, qword [rsp + 216] + JNE LBB4_183 + QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] + QUAD $0x000000d824943b4c // cmp r10, qword [rsp + 216] LONG $0x245c8a44; BYTE $0x08 // mov r11b, byte [rsp + 8] - QUAD $0x000000f824b48b48 // mov rsi, qword [rsp + 248] - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] + QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] JNE LBB4_69 - JMP LBB4_135 + JMP LBB4_142 -LBB4_186: +LBB4_185: LONG $0xf8e28349 // and r10, -8 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax - LONG $0x2454894c; BYTE $0x18 // mov qword [rsp + 24], r10 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] + LONG $0x2454894c; BYTE $0x20 // mov qword [rsp + 32], r10 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax LONG $0x246c8944; BYTE $0x40 // mov dword [rsp + 64], r13d LONG $0x6e0f4166; BYTE $0xc5 // movd xmm0, r13d LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 LONG $0x700f4466; WORD $0x00d8 // pshufd xmm11, xmm0, 0 WORD $0x3145; BYTE $0xff // xor r15d, r15d - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 LONG $0xef0f4566; BYTE $0xff // pxor xmm15, xmm15 -LBB4_187: - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 +LBB4_186: + LONG $0x247c894c; BYTE $0x18 // mov qword [rsp + 24], r15 LONG $0x06e7c149 // shl r15, 6 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x894d; BYTE $0xfc // mov r12, r15 @@ -22588,7 +23649,7 @@ LBB4_187: LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 LONG $0x5c6e0f66; WORD $0x1024 // movd xmm3, dword [rsp + 16] LONG $0x54b70f42; WORD $0x203e // movzx edx, word [rsi + r15 + 32] - LONG $0x20245489 // mov dword [rsp + 32], edx + LONG $0x28245489 // mov dword [rsp + 40], edx LONG $0x4cc40f66; WORD $0x0e0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 14], 1 QUAD $0x020e064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 14], 2 QUAD $0x030e264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 14], 3 @@ -22675,7 +23736,7 @@ LBB4_187: LONG $0x6f0f4566; BYTE $0xe9 // movdqa xmm13, xmm9 LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 - LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] + LONG $0x7c6e0f66; WORD $0x2824 // movd xmm7, dword [rsp + 40] LONG $0x54b70f46; WORD $0x2a3e // movzx r10d, word [rsi + r15 + 42] LONG $0x4cc40f66; WORD $0x1a0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 26], 1 QUAD $0x021a064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 26], 2 @@ -22698,7 +23759,7 @@ LBB4_187: LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 LONG $0x546e0f66; WORD $0x1024 // movd xmm2, dword [rsp + 16] LONG $0x54b70f42; WORD $0x2c3e // movzx edx, word [rsi + r15 + 44] - LONG $0x20245489 // mov dword [rsp + 32], edx + LONG $0x28245489 // mov dword [rsp + 40], edx QUAD $0x071c0e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 28], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -22785,7 +23846,7 @@ LBB4_187: LONG $0x6f0f4166; BYTE $0xea // movdqa xmm5, xmm10 LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 - LONG $0x4c6e0f66; WORD $0x2024 // movd xmm1, dword [rsp + 32] + LONG $0x4c6e0f66; WORD $0x2824 // movd xmm1, dword [rsp + 40] LONG $0x54b70f46; WORD $0x363e // movzx r10d, word [rsi + r15 + 54] LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -22938,7 +23999,7 @@ LBB4_187: LONG $0x44c40f66; WORD $0x3e0e; BYTE $0x01 // pinsrw xmm0, word [rsi + rcx + 62], 1 QUAD $0x023e0644c40f4266 // pinsrw xmm0, word [rsi + r8 + 62], 2 QUAD $0x033e2644c40f4266 // pinsrw xmm0, word [rsi + r12 + 62], 3 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x043e2e44c40f4266 // pinsrw xmm0, word [rsi + r13 + 62], 4 LONG $0x44c40f66; WORD $0x3e1e; BYTE $0x05 // pinsrw xmm0, word [rsi + rbx + 62], 5 LONG $0x44c40f66; WORD $0x3e3e; BYTE $0x06 // pinsrw xmm0, word [rsi + rdi + 62], 6 @@ -22959,276 +24020,646 @@ LBB4_187: LONG $0x600f4566; BYTE $0xc6 // punpcklbw xmm8, xmm14 LONG $0x600f4566; BYTE $0xe5 // punpcklbw xmm12, xmm13 LONG $0x610f4566; BYTE $0xe0 // punpcklwd xmm12, xmm8 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - LONG $0x7f0f45f3; WORD $0x8e24 // movdqu oword [r14 + 4*rcx], xmm12 - LONG $0x7f0f41f3; WORD $0x8e44; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm0 + LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] + LONG $0x7f0f44f3; WORD $0x8824 // movdqu oword [rax + 4*rcx], xmm12 + LONG $0x447f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm0 LONG $0x08c18348 // add rcx, 8 WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x244c3b48; BYTE $0x18 // cmp rcx, qword [rsp + 24] - JNE LBB4_187 - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] - LONG $0x24543b4c; BYTE $0x18 // cmp r10, qword [rsp + 24] - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] + LONG $0x244c3b48; BYTE $0x20 // cmp rcx, qword [rsp + 32] + JNE LBB4_186 + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] + LONG $0x24543b4c; BYTE $0x20 // cmp r10, qword [rsp + 32] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] LONG $0x246c8b44; BYTE $0x40 // mov r13d, dword [rsp + 64] LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] JNE LBB4_92 - JMP LBB4_139 + JMP LBB4_95 -LBB4_189: - LONG $0xf8e28349 // and r10, -8 - WORD $0x894c; BYTE $0xd0 // mov rax, r10 - LONG $0x06e0c148 // shl rax, 6 - WORD $0x0148; BYTE $0xf0 // add rax, rsi - LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax - LONG $0x2454894c; BYTE $0x18 // mov qword [rsp + 24], r10 - LONG $0x96048d4b // lea rax, [r14 + 4*r10] - LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax - LONG $0x246c8944; BYTE $0x40 // mov dword [rsp + 64], r13d - LONG $0x6e0f4166; BYTE $0xc5 // movd xmm0, r13d - LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 - LONG $0x700f4466; WORD $0x00d8 // pshufd xmm11, xmm0, 0 - WORD $0x3145; BYTE $0xff // xor r15d, r15d - QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 - LONG $0xef0f4566; BYTE $0xff // pxor xmm15, xmm15 +LBB4_188: + WORD $0x894d; BYTE $0xd0 // mov r8, r10 + LONG $0xfce08349 // and r8, -4 + WORD $0x894c; BYTE $0xc3 // mov rbx, r8 + LONG $0x07e3c148 // shl rbx, 7 + WORD $0x0148; BYTE $0xf3 // add rbx, rsi + LONG $0x843c8d4f // lea r15, [r12 + 4*r8] + WORD $0x280f; BYTE $0xc8 // movaps xmm1, xmm0 + LONG $0x00c8c60f // shufps xmm1, xmm0, 0 + LONG $0xfcc68148; WORD $0x0001; BYTE $0x00 // add rsi, 508 + WORD $0xc931 // xor ecx, ecx + LONG $0x6f0f4466; WORD $0x007d // movdqa xmm15, oword 0[rbp] /* [rip + .LCPI4_0] */ + LONG $0x6f0f4466; WORD $0x1045 // movdqa xmm8, oword 16[rbp] /* [rip + .LCPI4_1] */ + LONG $0x6f0f4466; WORD $0x2055 // movdqa xmm10, oword 32[rbp] /* [rip + .LCPI4_2] */ + LONG $0x6f0f4466; WORD $0x305d // movdqa xmm11, oword 48[rbp] /* [rip + .LCPI4_3] */ + LONG $0x6f0f4466; WORD $0x4065 // movdqa xmm12, oword 64[rbp] /* [rip + .LCPI4_4] */ + LONG $0x6f0f4466; WORD $0x506d // movdqa xmm13, oword 80[rbp] /* [rip + .LCPI4_5] */ + LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI4_6] */ + LONG $0x6f0f4466; WORD $0x704d // movdqa xmm9, oword 112[rbp] /* [rip + .LCPI4_7] */ -LBB4_190: - LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 - LONG $0x06e7c149 // shl r15, 6 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - WORD $0x894d; BYTE $0xfc // mov r12, r15 - WORD $0x894d; BYTE $0xfd // mov r13, r15 - WORD $0x894c; BYTE $0xfb // mov rbx, r15 - WORD $0x894c; BYTE $0xff // mov rdi, r15 - WORD $0x894d; BYTE $0xf9 // mov r9, r15 - LONG $0x04b70f42; BYTE $0x3e // movzx eax, word [rsi + r15] - LONG $0xe86e0f66 // movd xmm5, eax - LONG $0x44b70f42; WORD $0x023e // movzx eax, word [rsi + r15 + 2] - LONG $0xc06e0f66 // movd xmm0, eax - LONG $0x44b70f42; WORD $0x043e // movzx eax, word [rsi + r15 + 4] - LONG $0xc86e0f66 // movd xmm1, eax - LONG $0x44b70f42; WORD $0x063e // movzx eax, word [rsi + r15 + 6] - LONG $0xf86e0f66 // movd xmm7, eax - LONG $0x44b70f42; WORD $0x083e // movzx eax, word [rsi + r15 + 8] - LONG $0x6e0f4466; BYTE $0xc0 // movd xmm8, eax - LONG $0x44b70f42; WORD $0x0a3e // movzx eax, word [rsi + r15 + 10] - LONG $0xe06e0f66 // movd xmm4, eax - LONG $0x44b70f42; WORD $0x0c3e // movzx eax, word [rsi + r15 + 12] - LONG $0x54b70f46; WORD $0x0e3e // movzx r10d, word [rsi + r15 + 14] - LONG $0x5cb70f46; WORD $0x103e // movzx r11d, word [rsi + r15 + 16] - LONG $0x54b70f42; WORD $0x123e // movzx edx, word [rsi + r15 + 18] - LONG $0x74b70f46; WORD $0x143e // movzx r14d, word [rsi + r15 + 20] - WORD $0x894c; BYTE $0xf9 // mov rcx, r15 - LONG $0x40c98348 // or rcx, 64 - LONG $0x80c88149; WORD $0x0000; BYTE $0x00 // or r8, 128 - LONG $0xc0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 192 - LONG $0x00cd8149; WORD $0x0001; BYTE $0x00 // or r13, 256 - LONG $0x40cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 320 - LONG $0x80cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 384 - LONG $0x2cc40f66; WORD $0x010e // pinsrw xmm5, word [rsi + rcx], 1 - LONG $0xc40f4266; WORD $0x062c; BYTE $0x02 // pinsrw xmm5, word [rsi + r8], 2 - LONG $0xc40f4266; WORD $0x262c; BYTE $0x03 // pinsrw xmm5, word [rsi + r12], 3 - LONG $0xc40f4266; WORD $0x2e2c; BYTE $0x04 // pinsrw xmm5, word [rsi + r13], 4 - LONG $0x2cc40f66; WORD $0x051e // pinsrw xmm5, word [rsi + rbx], 5 - LONG $0x2cc40f66; WORD $0x063e // pinsrw xmm5, word [rsi + rdi], 6 - LONG $0x44c40f66; WORD $0x020e; BYTE $0x01 // pinsrw xmm0, word [rsi + rcx + 2], 1 - QUAD $0x02020644c40f4266 // pinsrw xmm0, word [rsi + r8 + 2], 2 - QUAD $0x03022644c40f4266 // pinsrw xmm0, word [rsi + r12 + 2], 3 - QUAD $0x04022e44c40f4266 // pinsrw xmm0, word [rsi + r13 + 2], 4 - LONG $0x44c40f66; WORD $0x021e; BYTE $0x05 // pinsrw xmm0, word [rsi + rbx + 2], 5 - LONG $0x44c40f66; WORD $0x023e; BYTE $0x06 // pinsrw xmm0, word [rsi + rdi + 2], 6 - LONG $0xc0c98149; WORD $0x0001; BYTE $0x00 // or r9, 448 - QUAD $0x07020e44c40f4266 // pinsrw xmm0, word [rsi + r9 + 2], 7 - LONG $0xd06e0f66 // movd xmm2, eax - LONG $0x44b70f42; WORD $0x163e // movzx eax, word [rsi + r15 + 22] - LONG $0x10244489 // mov dword [rsp + 16], eax - LONG $0x750f4166; BYTE $0xc3 // pcmpeqw xmm0, xmm11 - LONG $0x4cc40f66; WORD $0x040e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 4], 1 - QUAD $0x0204064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 4], 2 - QUAD $0x0304264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 4], 3 - QUAD $0x04042e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 4], 4 - LONG $0x4cc40f66; WORD $0x041e; BYTE $0x05 // pinsrw xmm1, word [rsi + rbx + 4], 5 - LONG $0x4cc40f66; WORD $0x043e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 4], 6 - QUAD $0x07040e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 4], 7 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 - QUAD $0x0000808d6f0f4466; BYTE $0x00 // movdqa xmm9, oword 128[rbp] /* [rip + .LCPI4_8] */ - LONG $0x6f0f4166; BYTE $0xd9 // movdqa xmm3, xmm9 - LONG $0x380f4166; WORD $0xdf10 // pblendvb xmm3, xmm15, xmm0 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - QUAD $0x00000090856f0f66 // movdqa xmm0, oword 144[rbp] /* [rip + .LCPI4_9] */ - LONG $0xf06f0f66 // movdqa xmm6, xmm0 - LONG $0x6f0f4466; BYTE $0xf0 // movdqa xmm14, xmm0 - LONG $0xc16f0f66 // movdqa xmm0, xmm1 - LONG $0x380f4166; WORD $0xf710 // pblendvb xmm6, xmm15, xmm0 - LONG $0x6e0f4166; BYTE $0xca // movd xmm1, r10d - LONG $0x54b70f46; WORD $0x183e // movzx r10d, word [rsi + r15 + 24] - LONG $0xc40f4266; WORD $0x0e2c; BYTE $0x07 // pinsrw xmm5, word [rsi + r9], 7 - LONG $0x750f4166; BYTE $0xeb // pcmpeqw xmm5, xmm11 - LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 - LONG $0xe8ef0f66 // pxor xmm5, xmm0 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0x7cc40f66; WORD $0x060e; BYTE $0x01 // pinsrw xmm7, word [rsi + rcx + 6], 1 - QUAD $0x0206067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 6], 2 - QUAD $0x0306267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 6], 3 - QUAD $0x04062e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 6], 4 - LONG $0x7cc40f66; WORD $0x061e; BYTE $0x05 // pinsrw xmm7, word [rsi + rbx + 6], 5 - LONG $0x7cc40f66; WORD $0x063e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 6], 6 - QUAD $0x07060e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 6], 7 - LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 - LONG $0xff630f66 // packsswb xmm7, xmm7 - QUAD $0x01080e44c40f4466 // pinsrw xmm8, word [rsi + rcx + 8], 1 - QUAD $0x02080644c40f4666 // pinsrw xmm8, word [rsi + r8 + 8], 2 - QUAD $0x03082644c40f4666 // pinsrw xmm8, word [rsi + r12 + 8], 3 - QUAD $0x04082e44c40f4666 // pinsrw xmm8, word [rsi + r13 + 8], 4 - QUAD $0x05081e44c40f4466 // pinsrw xmm8, word [rsi + rbx + 8], 5 - QUAD $0x06083e44c40f4466 // pinsrw xmm8, word [rsi + rdi + 8], 6 - QUAD $0x07080e44c40f4666 // pinsrw xmm8, word [rsi + r9 + 8], 7 - LONG $0xddf80f66 // psubb xmm3, xmm5 - QUAD $0x0000a0a56f0f4466; BYTE $0x00 // movdqa xmm12, oword 160[rbp] /* [rip + .LCPI4_10] */ - LONG $0xc76f0f66 // movdqa xmm0, xmm7 - LONG $0x380f4566; WORD $0xe710 // pblendvb xmm12, xmm15, xmm0 - LONG $0x6e0f4166; BYTE $0xfb // movd xmm7, r11d - LONG $0x44b70f42; WORD $0x1a3e // movzx eax, word [rsi + r15 + 26] - LONG $0x750f4566; BYTE $0xc3 // pcmpeqw xmm8, xmm11 - LONG $0x630f4566; BYTE $0xc0 // packsswb xmm8, xmm8 - LONG $0xeb0f4466; BYTE $0xe6 // por xmm12, xmm6 - QUAD $0x0000b0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 176[rbp] /* [rip + .LCPI4_11] */ - LONG $0x6f0f4166; BYTE $0xc0 // movdqa xmm0, xmm8 - LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 - LONG $0xf26e0f66 // movd xmm6, edx - LONG $0x5cb70f46; WORD $0x1c3e // movzx r11d, word [rsi + r15 + 28] - LONG $0x64c40f66; WORD $0x0a0e; BYTE $0x01 // pinsrw xmm4, word [rsi + rcx + 10], 1 - QUAD $0x020a0664c40f4266 // pinsrw xmm4, word [rsi + r8 + 10], 2 - QUAD $0x030a2664c40f4266 // pinsrw xmm4, word [rsi + r12 + 10], 3 - QUAD $0x040a2e64c40f4266 // pinsrw xmm4, word [rsi + r13 + 10], 4 - LONG $0x64c40f66; WORD $0x0a1e; BYTE $0x05 // pinsrw xmm4, word [rsi + rbx + 10], 5 - LONG $0x64c40f66; WORD $0x0a3e; BYTE $0x06 // pinsrw xmm4, word [rsi + rdi + 10], 6 - QUAD $0x070a0e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 10], 7 - LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 - LONG $0xe4630f66 // packsswb xmm4, xmm4 - LONG $0x54c40f66; WORD $0x0c0e; BYTE $0x01 // pinsrw xmm2, word [rsi + rcx + 12], 1 - QUAD $0x020c0654c40f4266 // pinsrw xmm2, word [rsi + r8 + 12], 2 - QUAD $0x030c2654c40f4266 // pinsrw xmm2, word [rsi + r12 + 12], 3 - QUAD $0x040c2e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 12], 4 - LONG $0x54c40f66; WORD $0x0c1e; BYTE $0x05 // pinsrw xmm2, word [rsi + rbx + 12], 5 - LONG $0x54c40f66; WORD $0x0c3e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 12], 6 - LONG $0xeb0f4466; BYTE $0xe3 // por xmm12, xmm3 - QUAD $0x000000c0ad6f0f66 // movdqa xmm5, oword 192[rbp] /* [rip + .LCPI4_12] */ - LONG $0xc46f0f66 // movdqa xmm0, xmm4 - LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 - LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d - LONG $0x54b70f42; WORD $0x1e3e // movzx edx, word [rsi + r15 + 30] - LONG $0x30245489 // mov dword [rsp + 48], edx - QUAD $0x070c0e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 12], 7 - LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xeb0f4166; BYTE $0xed // por xmm5, xmm13 - QUAD $0x0000d0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 208[rbp] /* [rip + .LCPI4_13] */ - LONG $0xc26f0f66 // movdqa xmm0, xmm2 - LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 - LONG $0x5c6e0f66; WORD $0x1024 // movd xmm3, dword [rsp + 16] - LONG $0x54b70f42; WORD $0x203e // movzx edx, word [rsi + r15 + 32] - LONG $0x20245489 // mov dword [rsp + 32], edx - LONG $0x4cc40f66; WORD $0x0e0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 14], 1 - QUAD $0x020e064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 14], 2 - QUAD $0x030e264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 14], 3 - QUAD $0x040e2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 14], 4 - LONG $0x4cc40f66; WORD $0x0e1e; BYTE $0x05 // pinsrw xmm1, word [rsi + rbx + 14], 5 - LONG $0x4cc40f66; WORD $0x0e3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 14], 6 - LONG $0xeb0f4466; BYTE $0xed // por xmm13, xmm5 - LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d - LONG $0x54b70f42; WORD $0x223e // movzx edx, word [rsi + r15 + 34] - LONG $0x10245489 // mov dword [rsp + 16], edx - QUAD $0x070e0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 14], 7 - LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 - LONG $0x74c40f66; WORD $0x120e; BYTE $0x01 // pinsrw xmm6, word [rsi + rcx + 18], 1 - QUAD $0x02120674c40f4266 // pinsrw xmm6, word [rsi + r8 + 18], 2 - QUAD $0x03122674c40f4266 // pinsrw xmm6, word [rsi + r12 + 18], 3 - QUAD $0x04122e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 18], 4 - LONG $0x74c40f66; WORD $0x121e; BYTE $0x05 // pinsrw xmm6, word [rsi + rbx + 18], 5 - LONG $0x74c40f66; WORD $0x123e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 18], 6 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - QUAD $0x07120e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 18], 7 - LONG $0x750f4166; BYTE $0xf3 // pcmpeqw xmm6, xmm11 +LBB4_189: + QUAD $0xfffffe04b6100ff3 // movss xmm6, dword [rsi - 508] + QUAD $0xfffffe08be100ff3 // movss xmm7, dword [rsi - 504] + QUAD $0xfffffe0cae100ff3 // movss xmm5, dword [rsi - 500] + QUAD $0xfffffe10a6100ff3 // movss xmm4, dword [rsi - 496] + QUAD $0xfffe84b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 380], 16 + QUAD $0xffff04b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 252], 32 + LONG $0x213a0f66; WORD $0x8476; BYTE $0x30 // insertps xmm6, dword [rsi - 124], 48 + LONG $0x04f1c20f // cmpneqps xmm6, xmm1 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xeb0f4566; BYTE $0xec // por xmm13, xmm12 - QUAD $0x0000e0a56f0f4466; BYTE $0x00 // movdqa xmm12, oword 224[rbp] /* [rip + .LCPI4_14] */ - LONG $0xc16f0f66 // movdqa xmm0, xmm1 - LONG $0x380f4566; WORD $0xe710 // pblendvb xmm12, xmm15, xmm0 - LONG $0x6f0f4566; BYTE $0xc1 // movdqa xmm8, xmm9 - LONG $0xc66f0f66 // movdqa xmm0, xmm6 - LONG $0x380f4566; WORD $0xc710 // pblendvb xmm8, xmm15, xmm0 - LONG $0xc86e0f66 // movd xmm1, eax - LONG $0x74b70f46; WORD $0x243e // movzx r14d, word [rsi + r15 + 36] - LONG $0x7cc40f66; WORD $0x100e; BYTE $0x01 // pinsrw xmm7, word [rsi + rcx + 16], 1 - QUAD $0x0210067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 16], 2 - QUAD $0x0310267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 16], 3 - QUAD $0x04102e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 16], 4 - LONG $0x7cc40f66; WORD $0x101e; BYTE $0x05 // pinsrw xmm7, word [rsi + rbx + 16], 5 - LONG $0x7cc40f66; WORD $0x103e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 16], 6 - LONG $0x64c40f66; WORD $0x140e; BYTE $0x01 // pinsrw xmm4, word [rsi + rcx + 20], 1 - QUAD $0x02140664c40f4266 // pinsrw xmm4, word [rsi + r8 + 20], 2 - QUAD $0x03142664c40f4266 // pinsrw xmm4, word [rsi + r12 + 20], 3 - QUAD $0x04142e64c40f4266 // pinsrw xmm4, word [rsi + r13 + 20], 4 - LONG $0x64c40f66; WORD $0x141e; BYTE $0x05 // pinsrw xmm4, word [rsi + rbx + 20], 5 - LONG $0x64c40f66; WORD $0x143e; BYTE $0x06 // pinsrw xmm4, word [rsi + rdi + 20], 6 - QUAD $0x07140e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 20], 7 - LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + QUAD $0xfffe88be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 376], 16 + QUAD $0xffff08be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 248], 32 + LONG $0x213a0f66; WORD $0x887e; BYTE $0x30 // insertps xmm7, dword [rsi - 120], 48 + QUAD $0xfffe8cae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 372], 16 + QUAD $0xffff0cae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 244], 32 + LONG $0x213a0f66; WORD $0x8c6e; BYTE $0x30 // insertps xmm5, dword [rsi - 116], 48 + QUAD $0xfffe90a6213a0f66; WORD $0x10ff // insertps xmm4, dword [rsi - 368], 16 + QUAD $0xffff10a6213a0f66; WORD $0x20ff // insertps xmm4, dword [rsi - 240], 32 + LONG $0x213a0f66; WORD $0x9066; BYTE $0x30 // insertps xmm4, dword [rsi - 112], 48 + LONG $0x04f9c20f // cmpneqps xmm7, xmm1 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 + LONG $0xff630f66 // packsswb xmm7, xmm7 + LONG $0xd76f0f66 // movdqa xmm2, xmm7 + LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 + LONG $0xd7f80f66 // psubb xmm2, xmm7 + QUAD $0xfffffe14be100ff3 // movss xmm7, dword [rsi - 492] + QUAD $0xfffe94be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 364], 16 + QUAD $0xffff14be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 236], 32 + LONG $0x213a0f66; WORD $0x947e; BYTE $0x30 // insertps xmm7, dword [rsi - 108], 48 + LONG $0xd6eb0f66 // por xmm2, xmm6 + QUAD $0xfffffe18b6100ff3 // movss xmm6, dword [rsi - 488] + QUAD $0xfffe98b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 360], 16 + QUAD $0xffff18b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 232], 32 + LONG $0x213a0f66; WORD $0x9876; BYTE $0x30 // insertps xmm6, dword [rsi - 104], 48 + LONG $0x04e9c20f // cmpneqps xmm5, xmm1 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x02 // psllw xmm5, 2 + LONG $0xdb0f4166; BYTE $0xe8 // pand xmm5, xmm8 + LONG $0xeaeb0f66 // por xmm5, xmm2 + QUAD $0xfffffe1c9e100ff3 // movss xmm3, dword [rsi - 484] + QUAD $0xfffe9c9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 356], 16 + QUAD $0xffff1c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 228], 32 + LONG $0x213a0f66; WORD $0x9c5e; BYTE $0x30 // insertps xmm3, dword [rsi - 100], 48 + LONG $0x04e1c20f // cmpneqps xmm4, xmm1 + LONG $0xe46b0f66 // packssdw xmm4, xmm4 LONG $0xe4630f66 // packsswb xmm4, xmm4 - LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 - LONG $0x6f0f4166; BYTE $0xee // movdqa xmm5, xmm14 - LONG $0xc46f0f66 // movdqa xmm0, xmm4 - LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 - LONG $0x6e0f4166; BYTE $0xe3 // movd xmm4, r11d - LONG $0x5cb70f46; WORD $0x263e // movzx r11d, word [rsi + r15 + 38] - QUAD $0x07100e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 16], 7 - LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 - QUAD $0x00000160bdef0f66 // pxor xmm7, oword 352[rbp] /* [rip + .LCPI4_22] */ + LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 + LONG $0xf4710f66; BYTE $0x03 // psllw xmm4, 3 + LONG $0xdb0f4166; BYTE $0xe2 // pand xmm4, xmm10 + LONG $0x04f9c20f // cmpneqps xmm7, xmm1 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0x5cc40f66; WORD $0x160e; BYTE $0x01 // pinsrw xmm3, word [rsi + rcx + 22], 1 - QUAD $0x0216065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 22], 2 - QUAD $0x0316265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 22], 3 - QUAD $0x04162e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 22], 4 - LONG $0x5cc40f66; WORD $0x161e; BYTE $0x05 // pinsrw xmm3, word [rsi + rbx + 22], 5 - LONG $0x5cc40f66; WORD $0x163e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 22], 6 - QUAD $0x07160e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 22], 7 - LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 + LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 + LONG $0xf7710f66; BYTE $0x04 // psllw xmm7, 4 + LONG $0xdb0f4166; BYTE $0xfb // pand xmm7, xmm11 + LONG $0xfceb0f66 // por xmm7, xmm4 + QUAD $0xfffffe20a6100ff3 // movss xmm4, dword [rsi - 480] + QUAD $0xfffea0a6213a0f66; WORD $0x10ff // insertps xmm4, dword [rsi - 352], 16 + QUAD $0xffff20a6213a0f66; WORD $0x20ff // insertps xmm4, dword [rsi - 224], 32 + LONG $0x213a0f66; WORD $0xa066; BYTE $0x30 // insertps xmm4, dword [rsi - 96], 48 + LONG $0xfdeb0f66 // por xmm7, xmm5 + QUAD $0xfffffe24ae100ff3 // movss xmm5, dword [rsi - 476] + QUAD $0xfffea4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 348], 16 + QUAD $0xffff24ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 220], 32 + LONG $0x213a0f66; WORD $0xa46e; BYTE $0x30 // insertps xmm5, dword [rsi - 92], 48 + LONG $0x04e9c20f // cmpneqps xmm5, xmm1 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0x04f1c20f // cmpneqps xmm6, xmm1 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf6710f66; BYTE $0x05 // psllw xmm6, 5 + LONG $0xdb0f4166; BYTE $0xf4 // pand xmm6, xmm12 + LONG $0x04d9c20f // cmpneqps xmm3, xmm1 + LONG $0xdb6b0f66 // packssdw xmm3, xmm3 LONG $0xdb630f66 // packsswb xmm3, xmm3 - LONG $0x54c40f66; WORD $0x180e; BYTE $0x01 // pinsrw xmm2, word [rsi + rcx + 24], 1 - QUAD $0x02180654c40f4266 // pinsrw xmm2, word [rsi + r8 + 24], 2 - QUAD $0x03182654c40f4266 // pinsrw xmm2, word [rsi + r12 + 24], 3 - QUAD $0x04182e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 24], 4 - LONG $0x54c40f66; WORD $0x181e; BYTE $0x05 // pinsrw xmm2, word [rsi + rbx + 24], 5 - LONG $0x54c40f66; WORD $0x183e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 24], 6 - QUAD $0x07180e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 24], 7 - LONG $0xf80f4466; BYTE $0xc7 // psubb xmm8, xmm7 - QUAD $0x0000a0956f0f4466; BYTE $0x00 // movdqa xmm10, oword 160[rbp] /* [rip + .LCPI4_10] */ - LONG $0x6f0f4566; BYTE $0xf2 // movdqa xmm14, xmm10 - LONG $0xc36f0f66 // movdqa xmm0, xmm3 - LONG $0x380f4566; WORD $0xf710 // pblendvb xmm14, xmm15, xmm0 - LONG $0x5c6e0f66; WORD $0x3024 // movd xmm3, dword [rsp + 48] - LONG $0x44b70f42; WORD $0x283e // movzx eax, word [rsi + r15 + 40] - LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 + LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 + LONG $0xf3710f66; BYTE $0x06 // psllw xmm3, 6 + LONG $0xdb0f4166; BYTE $0xdd // pand xmm3, xmm13 + LONG $0xdeeb0f66 // por xmm3, xmm6 + QUAD $0xfffffe2896100ff3 // movss xmm2, dword [rsi - 472] + QUAD $0xfffea896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 344], 16 + QUAD $0xffff2896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 216], 32 + LONG $0x213a0f66; WORD $0xa856; BYTE $0x30 // insertps xmm2, dword [rsi - 88], 48 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0x04e1c20f // cmpneqps xmm4, xmm1 + LONG $0xe46b0f66 // packssdw xmm4, xmm4 + LONG $0xe4630f66 // packsswb xmm4, xmm4 + LONG $0xf4710f66; BYTE $0x07 // psllw xmm4, 7 + LONG $0xdb0f4166; BYTE $0xe6 // pand xmm4, xmm14 + LONG $0xe3eb0f66 // por xmm4, xmm3 + QUAD $0xfffffe2c9e100ff3 // movss xmm3, dword [rsi - 468] + QUAD $0xfffeac9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 340], 16 + QUAD $0xffff2c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 212], 32 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0x213a0f66; WORD $0xac5e; BYTE $0x30 // insertps xmm3, dword [rsi - 84], 48 + LONG $0xe7eb0f66 // por xmm4, xmm7 + LONG $0x04d1c20f // cmpneqps xmm2, xmm1 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xeb0f4466; BYTE $0xf5 // por xmm14, xmm5 - QUAD $0x0000b08d6f0f4466; BYTE $0x00 // movdqa xmm9, oword 176[rbp] /* [rip + .LCPI4_11] */ - LONG $0x6f0f4566; BYTE $0xe9 // movdqa xmm13, xmm9 - LONG $0xc26f0f66 // movdqa xmm0, xmm2 - LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 - LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] - LONG $0x54b70f46; WORD $0x2a3e // movzx r10d, word [rsi + r15 + 42] - LONG $0x4cc40f66; WORD $0x1a0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 26], 1 - QUAD $0x021a064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 26], 2 - QUAD $0x031a264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 26], 3 - QUAD $0x041a2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 26], 4 - LONG $0x4cc40f66; WORD $0x1a1e; BYTE $0x05 // pinsrw xmm1, word [rsi + rbx + 26], 5 - LONG $0x4cc40f66; WORD $0x1a3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 26], 6 - QUAD $0x071a0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 26], 7 - LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - LONG $0x64c40f66; WORD $0x1c0e; BYTE $0x01 // pinsrw xmm4, word [rsi + rcx + 28], 1 - QUAD $0x021c0664c40f4266 // pinsrw xmm4, word [rsi + r8 + 28], 2 + LONG $0xf26f0f66 // movdqa xmm6, xmm2 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf2f80f66 // psubb xmm6, xmm2 + QUAD $0xfffffe30be100ff3 // movss xmm7, dword [rsi - 464] + QUAD $0xfffeb0be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 336], 16 + QUAD $0xffff30be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 208], 32 + LONG $0x213a0f66; WORD $0xb07e; BYTE $0x30 // insertps xmm7, dword [rsi - 80], 48 + LONG $0xf5eb0f66 // por xmm6, xmm5 + QUAD $0xfffffe34ae100ff3 // movss xmm5, dword [rsi - 460] + QUAD $0xfffeb4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 332], 16 + QUAD $0xffff34ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 204], 32 + LONG $0x213a0f66; WORD $0xb46e; BYTE $0x30 // insertps xmm5, dword [rsi - 76], 48 + LONG $0x04d9c20f // cmpneqps xmm3, xmm1 + LONG $0xdb6b0f66 // packssdw xmm3, xmm3 + LONG $0xdb630f66 // packsswb xmm3, xmm3 + LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 + LONG $0xf3710f66; BYTE $0x02 // psllw xmm3, 2 + LONG $0xdb0f4166; BYTE $0xd8 // pand xmm3, xmm8 + LONG $0xdeeb0f66 // por xmm3, xmm6 + QUAD $0xfffffe38b6100ff3 // movss xmm6, dword [rsi - 456] + QUAD $0xfffeb8b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 328], 16 + QUAD $0xffff38b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 200], 32 + LONG $0x213a0f66; WORD $0xb876; BYTE $0x30 // insertps xmm6, dword [rsi - 72], 48 + LONG $0x04f9c20f // cmpneqps xmm7, xmm1 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 + LONG $0xff630f66 // packsswb xmm7, xmm7 + LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 + LONG $0xf7710f66; BYTE $0x03 // psllw xmm7, 3 + LONG $0xdb0f4166; BYTE $0xfa // pand xmm7, xmm10 + LONG $0x04e9c20f // cmpneqps xmm5, xmm1 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x04 // psllw xmm5, 4 + LONG $0xdb0f4166; BYTE $0xeb // pand xmm5, xmm11 + LONG $0xefeb0f66 // por xmm5, xmm7 + QUAD $0xfffffe3c96100ff3 // movss xmm2, dword [rsi - 452] + QUAD $0xfffebc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 324], 16 + QUAD $0xffff3c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 196], 32 + LONG $0x213a0f66; WORD $0xbc56; BYTE $0x30 // insertps xmm2, dword [rsi - 68], 48 + LONG $0xebeb0f66 // por xmm5, xmm3 + QUAD $0xfffffe40be100ff3 // movss xmm7, dword [rsi - 448] + QUAD $0xfffec0be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 320], 16 + QUAD $0xffff40be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 192], 32 + LONG $0x213a0f66; WORD $0xc07e; BYTE $0x30 // insertps xmm7, dword [rsi - 64], 48 + LONG $0x04f1c20f // cmpneqps xmm6, xmm1 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf6710f66; BYTE $0x05 // psllw xmm6, 5 + LONG $0xdb0f4166; BYTE $0xf4 // pand xmm6, xmm12 + LONG $0x04d1c20f // cmpneqps xmm2, xmm1 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 + LONG $0xf2710f66; BYTE $0x06 // psllw xmm2, 6 + LONG $0xdb0f4166; BYTE $0xd5 // pand xmm2, xmm13 + LONG $0xd6eb0f66 // por xmm2, xmm6 + QUAD $0xfffffe44b6100ff3 // movss xmm6, dword [rsi - 444] + QUAD $0xfffec4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 316], 16 + QUAD $0xffff44b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 188], 32 + LONG $0x213a0f66; WORD $0xc476; BYTE $0x30 // insertps xmm6, dword [rsi - 60], 48 + LONG $0x04f1c20f // cmpneqps xmm6, xmm1 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0x04f9c20f // cmpneqps xmm7, xmm1 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 + LONG $0xff630f66 // packsswb xmm7, xmm7 + LONG $0xf7710f66; BYTE $0x07 // psllw xmm7, 7 + LONG $0xdb0f4166; BYTE $0xfe // pand xmm7, xmm14 + LONG $0xfaeb0f66 // por xmm7, xmm2 + QUAD $0xfffffe4896100ff3 // movss xmm2, dword [rsi - 440] + QUAD $0xfffec896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 312], 16 + QUAD $0xffff4896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 184], 32 + LONG $0x213a0f66; WORD $0xc856; BYTE $0x30 // insertps xmm2, dword [rsi - 56], 48 + LONG $0xfdeb0f66 // por xmm7, xmm5 + QUAD $0xfffffe4c9e100ff3 // movss xmm3, dword [rsi - 436] + QUAD $0xfffecc9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 308], 16 + QUAD $0xffff4c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 180], 32 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0x213a0f66; WORD $0xcc5e; BYTE $0x30 // insertps xmm3, dword [rsi - 52], 48 + LONG $0xe7620f66 // punpckldq xmm4, xmm7 + LONG $0x04d1c20f // cmpneqps xmm2, xmm1 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xfa6f0f66 // movdqa xmm7, xmm2 + LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 + LONG $0xfaf80f66 // psubb xmm7, xmm2 + QUAD $0xfffffe50ae100ff3 // movss xmm5, dword [rsi - 432] + QUAD $0xfffed0ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 304], 16 + QUAD $0xffff50ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 176], 32 + LONG $0x213a0f66; WORD $0xd06e; BYTE $0x30 // insertps xmm5, dword [rsi - 48], 48 + LONG $0xfeeb0f66 // por xmm7, xmm6 + QUAD $0xfffffe54b6100ff3 // movss xmm6, dword [rsi - 428] + QUAD $0xfffed4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 300], 16 + QUAD $0xffff54b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 172], 32 + LONG $0x213a0f66; WORD $0xd476; BYTE $0x30 // insertps xmm6, dword [rsi - 44], 48 + LONG $0x04d9c20f // cmpneqps xmm3, xmm1 + LONG $0xdb6b0f66 // packssdw xmm3, xmm3 + LONG $0xdb630f66 // packsswb xmm3, xmm3 + LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 + LONG $0xf3710f66; BYTE $0x02 // psllw xmm3, 2 + LONG $0xdb0f4166; BYTE $0xd8 // pand xmm3, xmm8 + LONG $0xdfeb0f66 // por xmm3, xmm7 + QUAD $0xfffffe58be100ff3 // movss xmm7, dword [rsi - 424] + QUAD $0xfffed8be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 296], 16 + QUAD $0xffff58be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 168], 32 + LONG $0x213a0f66; WORD $0xd87e; BYTE $0x30 // insertps xmm7, dword [rsi - 40], 48 + LONG $0x04e9c20f // cmpneqps xmm5, xmm1 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x03 // psllw xmm5, 3 + LONG $0xdb0f4166; BYTE $0xea // pand xmm5, xmm10 + LONG $0x04f1c20f // cmpneqps xmm6, xmm1 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf6710f66; BYTE $0x04 // psllw xmm6, 4 + LONG $0xdb0f4166; BYTE $0xf3 // pand xmm6, xmm11 + LONG $0xf5eb0f66 // por xmm6, xmm5 + QUAD $0xfffffe5c96100ff3 // movss xmm2, dword [rsi - 420] + QUAD $0xfffedc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 292], 16 + QUAD $0xffff5c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 164], 32 + LONG $0x213a0f66; WORD $0xdc56; BYTE $0x30 // insertps xmm2, dword [rsi - 36], 48 + LONG $0xf3eb0f66 // por xmm6, xmm3 + QUAD $0xfffffe60ae100ff3 // movss xmm5, dword [rsi - 416] + QUAD $0xfffee0ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 288], 16 + QUAD $0xffff60ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 160], 32 + LONG $0x213a0f66; WORD $0xe06e; BYTE $0x30 // insertps xmm5, dword [rsi - 32], 48 + LONG $0x04f9c20f // cmpneqps xmm7, xmm1 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 + LONG $0xff630f66 // packsswb xmm7, xmm7 + LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 + LONG $0xf7710f66; BYTE $0x05 // psllw xmm7, 5 + LONG $0xdb0f4166; BYTE $0xfc // pand xmm7, xmm12 + LONG $0x04d1c20f // cmpneqps xmm2, xmm1 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 + LONG $0xf2710f66; BYTE $0x06 // psllw xmm2, 6 + LONG $0xdb0f4166; BYTE $0xd5 // pand xmm2, xmm13 + LONG $0xd7eb0f66 // por xmm2, xmm7 + QUAD $0xfffffe64be100ff3 // movss xmm7, dword [rsi - 412] + QUAD $0xfffee4be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 284], 16 + QUAD $0xffff64be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 156], 32 + LONG $0x213a0f66; WORD $0xe47e; BYTE $0x30 // insertps xmm7, dword [rsi - 28], 48 + LONG $0x04f9c20f // cmpneqps xmm7, xmm1 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 + LONG $0xff630f66 // packsswb xmm7, xmm7 + LONG $0x04e9c20f // cmpneqps xmm5, xmm1 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xf5710f66; BYTE $0x07 // psllw xmm5, 7 + LONG $0xdb0f4166; BYTE $0xee // pand xmm5, xmm14 + LONG $0xeaeb0f66 // por xmm5, xmm2 + QUAD $0xfffffe6896100ff3 // movss xmm2, dword [rsi - 408] + QUAD $0xfffee896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 280], 16 + QUAD $0xffff6896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 152], 32 + LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 + LONG $0x213a0f66; WORD $0xe856; BYTE $0x30 // insertps xmm2, dword [rsi - 24], 48 + LONG $0xeeeb0f66 // por xmm5, xmm6 + LONG $0x04d1c20f // cmpneqps xmm2, xmm1 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xf26f0f66 // movdqa xmm6, xmm2 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf2f80f66 // psubb xmm6, xmm2 + QUAD $0xfffffe6c9e100ff3 // movss xmm3, dword [rsi - 404] + QUAD $0xfffeec9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 276], 16 + QUAD $0xffff6c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 148], 32 + LONG $0x213a0f66; WORD $0xec5e; BYTE $0x30 // insertps xmm3, dword [rsi - 20], 48 + LONG $0xf7eb0f66 // por xmm6, xmm7 + QUAD $0xfffffe7096100ff3 // movss xmm2, dword [rsi - 400] + QUAD $0xfffef096213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 272], 16 + QUAD $0xffff7096213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 144], 32 + LONG $0x213a0f66; WORD $0xf056; BYTE $0x30 // insertps xmm2, dword [rsi - 16], 48 + LONG $0x04d9c20f // cmpneqps xmm3, xmm1 + LONG $0xdb6b0f66 // packssdw xmm3, xmm3 + LONG $0xdb630f66 // packsswb xmm3, xmm3 + LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 + LONG $0xf3710f66; BYTE $0x02 // psllw xmm3, 2 + LONG $0xdb0f4166; BYTE $0xd8 // pand xmm3, xmm8 + LONG $0xdeeb0f66 // por xmm3, xmm6 + QUAD $0xfffffe74b6100ff3 // movss xmm6, dword [rsi - 396] + QUAD $0xfffef4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 268], 16 + QUAD $0xffff74b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 140], 32 + LONG $0x213a0f66; WORD $0xf476; BYTE $0x30 // insertps xmm6, dword [rsi - 12], 48 + LONG $0x04d1c20f // cmpneqps xmm2, xmm1 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 + LONG $0xf2710f66; BYTE $0x03 // psllw xmm2, 3 + LONG $0xdb0f4166; BYTE $0xd2 // pand xmm2, xmm10 + LONG $0x04f1c20f // cmpneqps xmm6, xmm1 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf6710f66; BYTE $0x04 // psllw xmm6, 4 + LONG $0xdb0f4166; BYTE $0xf3 // pand xmm6, xmm11 + LONG $0xf2eb0f66 // por xmm6, xmm2 + QUAD $0xfffffe78be100ff3 // movss xmm7, dword [rsi - 392] + QUAD $0xfffef8be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 264], 16 + QUAD $0xffff78be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 136], 32 + LONG $0x213a0f66; WORD $0xf87e; BYTE $0x30 // insertps xmm7, dword [rsi - 8], 48 + LONG $0xf3eb0f66 // por xmm6, xmm3 + QUAD $0xfffffe7c96100ff3 // movss xmm2, dword [rsi - 388] + QUAD $0xfffefc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 260], 16 + QUAD $0xffff7c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 132], 32 + LONG $0x213a0f66; WORD $0xfc56; BYTE $0x30 // insertps xmm2, dword [rsi - 4], 48 + LONG $0x04f9c20f // cmpneqps xmm7, xmm1 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 + LONG $0xff630f66 // packsswb xmm7, xmm7 + LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 + LONG $0xf7710f66; BYTE $0x05 // psllw xmm7, 5 + LONG $0xdb0f4166; BYTE $0xfc // pand xmm7, xmm12 + LONG $0x04d1c20f // cmpneqps xmm2, xmm1 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 + LONG $0xf2710f66; BYTE $0x06 // psllw xmm2, 6 + LONG $0xdb0f4166; BYTE $0xd5 // pand xmm2, xmm13 + LONG $0xd7eb0f66 // por xmm2, xmm7 + QUAD $0xfffffe809e100ff3 // movss xmm3, dword [rsi - 384] + QUAD $0xffff009e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 256], 16 + LONG $0x213a0f66; WORD $0x805e; BYTE $0x20 // insertps xmm3, dword [rsi - 128], 32 + LONG $0x213a0f66; WORD $0x301e // insertps xmm3, dword [rsi], 48 + LONG $0x04d9c20f // cmpneqps xmm3, xmm1 + LONG $0xdb6b0f66 // packssdw xmm3, xmm3 + LONG $0xdb630f66 // packsswb xmm3, xmm3 + LONG $0xf3710f66; BYTE $0x07 // psllw xmm3, 7 + LONG $0xdb0f4166; BYTE $0xde // pand xmm3, xmm14 + LONG $0xdaeb0f66 // por xmm3, xmm2 + LONG $0xdeeb0f66 // por xmm3, xmm6 + LONG $0xeb620f66 // punpckldq xmm5, xmm3 + LONG $0xe5600f66 // punpcklbw xmm4, xmm5 + LONG $0x380f4166; WORD $0xe100 // pshufb xmm4, xmm9 + LONG $0x7f0f41f3; WORD $0x8c24 // movdqu oword [r12 + 4*rcx], xmm4 + LONG $0x04c18348 // add rcx, 4 + LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // add rsi, 512 + WORD $0x3949; BYTE $0xc8 // cmp r8, rcx + JNE LBB4_189 + WORD $0x394d; BYTE $0xc2 // cmp r10, r8 + JNE LBB4_134 + JMP LBB4_146 + +LBB4_191: + LONG $0xf8e28349 // and r10, -8 + WORD $0x894c; BYTE $0xd0 // mov rax, r10 + LONG $0x06e0c148 // shl rax, 6 + WORD $0x0148; BYTE $0xf0 // add rax, rsi + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + LONG $0x2454894c; BYTE $0x20 // mov qword [rsp + 32], r10 + LONG $0x94048d4b // lea rax, [r12 + 4*r10] + LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax + LONG $0x246c8944; BYTE $0x40 // mov dword [rsp + 64], r13d + LONG $0x6e0f4166; BYTE $0xc5 // movd xmm0, r13d + LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 + LONG $0x700f4466; WORD $0x00d8 // pshufd xmm11, xmm0, 0 + WORD $0x3145; BYTE $0xff // xor r15d, r15d + QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 + LONG $0xef0f4566; BYTE $0xff // pxor xmm15, xmm15 + +LBB4_192: + LONG $0x247c894c; BYTE $0x18 // mov qword [rsp + 24], r15 + LONG $0x06e7c149 // shl r15, 6 + WORD $0x894d; BYTE $0xf8 // mov r8, r15 + WORD $0x894d; BYTE $0xfc // mov r12, r15 + WORD $0x894d; BYTE $0xfd // mov r13, r15 + WORD $0x894c; BYTE $0xfb // mov rbx, r15 + WORD $0x894c; BYTE $0xff // mov rdi, r15 + WORD $0x894d; BYTE $0xf9 // mov r9, r15 + LONG $0x04b70f42; BYTE $0x3e // movzx eax, word [rsi + r15] + LONG $0xe86e0f66 // movd xmm5, eax + LONG $0x44b70f42; WORD $0x023e // movzx eax, word [rsi + r15 + 2] + LONG $0xc06e0f66 // movd xmm0, eax + LONG $0x44b70f42; WORD $0x043e // movzx eax, word [rsi + r15 + 4] + LONG $0xc86e0f66 // movd xmm1, eax + LONG $0x44b70f42; WORD $0x063e // movzx eax, word [rsi + r15 + 6] + LONG $0xf86e0f66 // movd xmm7, eax + LONG $0x44b70f42; WORD $0x083e // movzx eax, word [rsi + r15 + 8] + LONG $0x6e0f4466; BYTE $0xc0 // movd xmm8, eax + LONG $0x44b70f42; WORD $0x0a3e // movzx eax, word [rsi + r15 + 10] + LONG $0xe06e0f66 // movd xmm4, eax + LONG $0x44b70f42; WORD $0x0c3e // movzx eax, word [rsi + r15 + 12] + LONG $0x54b70f46; WORD $0x0e3e // movzx r10d, word [rsi + r15 + 14] + LONG $0x5cb70f46; WORD $0x103e // movzx r11d, word [rsi + r15 + 16] + LONG $0x54b70f42; WORD $0x123e // movzx edx, word [rsi + r15 + 18] + LONG $0x74b70f46; WORD $0x143e // movzx r14d, word [rsi + r15 + 20] + WORD $0x894c; BYTE $0xf9 // mov rcx, r15 + LONG $0x40c98348 // or rcx, 64 + LONG $0x80c88149; WORD $0x0000; BYTE $0x00 // or r8, 128 + LONG $0xc0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 192 + LONG $0x00cd8149; WORD $0x0001; BYTE $0x00 // or r13, 256 + LONG $0x40cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 320 + LONG $0x80cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 384 + LONG $0x2cc40f66; WORD $0x010e // pinsrw xmm5, word [rsi + rcx], 1 + LONG $0xc40f4266; WORD $0x062c; BYTE $0x02 // pinsrw xmm5, word [rsi + r8], 2 + LONG $0xc40f4266; WORD $0x262c; BYTE $0x03 // pinsrw xmm5, word [rsi + r12], 3 + LONG $0xc40f4266; WORD $0x2e2c; BYTE $0x04 // pinsrw xmm5, word [rsi + r13], 4 + LONG $0x2cc40f66; WORD $0x051e // pinsrw xmm5, word [rsi + rbx], 5 + LONG $0x2cc40f66; WORD $0x063e // pinsrw xmm5, word [rsi + rdi], 6 + LONG $0x44c40f66; WORD $0x020e; BYTE $0x01 // pinsrw xmm0, word [rsi + rcx + 2], 1 + QUAD $0x02020644c40f4266 // pinsrw xmm0, word [rsi + r8 + 2], 2 + QUAD $0x03022644c40f4266 // pinsrw xmm0, word [rsi + r12 + 2], 3 + QUAD $0x04022e44c40f4266 // pinsrw xmm0, word [rsi + r13 + 2], 4 + LONG $0x44c40f66; WORD $0x021e; BYTE $0x05 // pinsrw xmm0, word [rsi + rbx + 2], 5 + LONG $0x44c40f66; WORD $0x023e; BYTE $0x06 // pinsrw xmm0, word [rsi + rdi + 2], 6 + LONG $0xc0c98149; WORD $0x0001; BYTE $0x00 // or r9, 448 + QUAD $0x07020e44c40f4266 // pinsrw xmm0, word [rsi + r9 + 2], 7 + LONG $0xd06e0f66 // movd xmm2, eax + LONG $0x44b70f42; WORD $0x163e // movzx eax, word [rsi + r15 + 22] + LONG $0x10244489 // mov dword [rsp + 16], eax + LONG $0x750f4166; BYTE $0xc3 // pcmpeqw xmm0, xmm11 + LONG $0x4cc40f66; WORD $0x040e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 4], 1 + QUAD $0x0204064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 4], 2 + QUAD $0x0304264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 4], 3 + QUAD $0x04042e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 4], 4 + LONG $0x4cc40f66; WORD $0x041e; BYTE $0x05 // pinsrw xmm1, word [rsi + rbx + 4], 5 + LONG $0x4cc40f66; WORD $0x043e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 4], 6 + QUAD $0x07040e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 4], 7 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 + QUAD $0x0000808d6f0f4466; BYTE $0x00 // movdqa xmm9, oword 128[rbp] /* [rip + .LCPI4_8] */ + LONG $0x6f0f4166; BYTE $0xd9 // movdqa xmm3, xmm9 + LONG $0x380f4166; WORD $0xdf10 // pblendvb xmm3, xmm15, xmm0 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + QUAD $0x00000090856f0f66 // movdqa xmm0, oword 144[rbp] /* [rip + .LCPI4_9] */ + LONG $0xf06f0f66 // movdqa xmm6, xmm0 + LONG $0x6f0f4466; BYTE $0xf0 // movdqa xmm14, xmm0 + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x380f4166; WORD $0xf710 // pblendvb xmm6, xmm15, xmm0 + LONG $0x6e0f4166; BYTE $0xca // movd xmm1, r10d + LONG $0x54b70f46; WORD $0x183e // movzx r10d, word [rsi + r15 + 24] + LONG $0xc40f4266; WORD $0x0e2c; BYTE $0x07 // pinsrw xmm5, word [rsi + r9], 7 + LONG $0x750f4166; BYTE $0xeb // pcmpeqw xmm5, xmm11 + LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 + LONG $0xe8ef0f66 // pxor xmm5, xmm0 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0x7cc40f66; WORD $0x060e; BYTE $0x01 // pinsrw xmm7, word [rsi + rcx + 6], 1 + QUAD $0x0206067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 6], 2 + QUAD $0x0306267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 6], 3 + QUAD $0x04062e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 6], 4 + LONG $0x7cc40f66; WORD $0x061e; BYTE $0x05 // pinsrw xmm7, word [rsi + rbx + 6], 5 + LONG $0x7cc40f66; WORD $0x063e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 6], 6 + QUAD $0x07060e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 6], 7 + LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 + LONG $0xff630f66 // packsswb xmm7, xmm7 + QUAD $0x01080e44c40f4466 // pinsrw xmm8, word [rsi + rcx + 8], 1 + QUAD $0x02080644c40f4666 // pinsrw xmm8, word [rsi + r8 + 8], 2 + QUAD $0x03082644c40f4666 // pinsrw xmm8, word [rsi + r12 + 8], 3 + QUAD $0x04082e44c40f4666 // pinsrw xmm8, word [rsi + r13 + 8], 4 + QUAD $0x05081e44c40f4466 // pinsrw xmm8, word [rsi + rbx + 8], 5 + QUAD $0x06083e44c40f4466 // pinsrw xmm8, word [rsi + rdi + 8], 6 + QUAD $0x07080e44c40f4666 // pinsrw xmm8, word [rsi + r9 + 8], 7 + LONG $0xddf80f66 // psubb xmm3, xmm5 + QUAD $0x0000a0a56f0f4466; BYTE $0x00 // movdqa xmm12, oword 160[rbp] /* [rip + .LCPI4_10] */ + LONG $0xc76f0f66 // movdqa xmm0, xmm7 + LONG $0x380f4566; WORD $0xe710 // pblendvb xmm12, xmm15, xmm0 + LONG $0x6e0f4166; BYTE $0xfb // movd xmm7, r11d + LONG $0x44b70f42; WORD $0x1a3e // movzx eax, word [rsi + r15 + 26] + LONG $0x750f4566; BYTE $0xc3 // pcmpeqw xmm8, xmm11 + LONG $0x630f4566; BYTE $0xc0 // packsswb xmm8, xmm8 + LONG $0xeb0f4466; BYTE $0xe6 // por xmm12, xmm6 + QUAD $0x0000b0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 176[rbp] /* [rip + .LCPI4_11] */ + LONG $0x6f0f4166; BYTE $0xc0 // movdqa xmm0, xmm8 + LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 + LONG $0xf26e0f66 // movd xmm6, edx + LONG $0x5cb70f46; WORD $0x1c3e // movzx r11d, word [rsi + r15 + 28] + LONG $0x64c40f66; WORD $0x0a0e; BYTE $0x01 // pinsrw xmm4, word [rsi + rcx + 10], 1 + QUAD $0x020a0664c40f4266 // pinsrw xmm4, word [rsi + r8 + 10], 2 + QUAD $0x030a2664c40f4266 // pinsrw xmm4, word [rsi + r12 + 10], 3 + QUAD $0x040a2e64c40f4266 // pinsrw xmm4, word [rsi + r13 + 10], 4 + LONG $0x64c40f66; WORD $0x0a1e; BYTE $0x05 // pinsrw xmm4, word [rsi + rbx + 10], 5 + LONG $0x64c40f66; WORD $0x0a3e; BYTE $0x06 // pinsrw xmm4, word [rsi + rdi + 10], 6 + QUAD $0x070a0e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 10], 7 + LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 + LONG $0xe4630f66 // packsswb xmm4, xmm4 + LONG $0x54c40f66; WORD $0x0c0e; BYTE $0x01 // pinsrw xmm2, word [rsi + rcx + 12], 1 + QUAD $0x020c0654c40f4266 // pinsrw xmm2, word [rsi + r8 + 12], 2 + QUAD $0x030c2654c40f4266 // pinsrw xmm2, word [rsi + r12 + 12], 3 + QUAD $0x040c2e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 12], 4 + LONG $0x54c40f66; WORD $0x0c1e; BYTE $0x05 // pinsrw xmm2, word [rsi + rbx + 12], 5 + LONG $0x54c40f66; WORD $0x0c3e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 12], 6 + LONG $0xeb0f4466; BYTE $0xe3 // por xmm12, xmm3 + QUAD $0x000000c0ad6f0f66 // movdqa xmm5, oword 192[rbp] /* [rip + .LCPI4_12] */ + LONG $0xc46f0f66 // movdqa xmm0, xmm4 + LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 + LONG $0x6e0f4166; BYTE $0xe6 // movd xmm4, r14d + LONG $0x54b70f42; WORD $0x1e3e // movzx edx, word [rsi + r15 + 30] + LONG $0x30245489 // mov dword [rsp + 48], edx + QUAD $0x070c0e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 12], 7 + LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xeb0f4166; BYTE $0xed // por xmm5, xmm13 + QUAD $0x0000d0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 208[rbp] /* [rip + .LCPI4_13] */ + LONG $0xc26f0f66 // movdqa xmm0, xmm2 + LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 + LONG $0x5c6e0f66; WORD $0x1024 // movd xmm3, dword [rsp + 16] + LONG $0x54b70f42; WORD $0x203e // movzx edx, word [rsi + r15 + 32] + LONG $0x28245489 // mov dword [rsp + 40], edx + LONG $0x4cc40f66; WORD $0x0e0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 14], 1 + QUAD $0x020e064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 14], 2 + QUAD $0x030e264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 14], 3 + QUAD $0x040e2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 14], 4 + LONG $0x4cc40f66; WORD $0x0e1e; BYTE $0x05 // pinsrw xmm1, word [rsi + rbx + 14], 5 + LONG $0x4cc40f66; WORD $0x0e3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 14], 6 + LONG $0xeb0f4466; BYTE $0xed // por xmm13, xmm5 + LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d + LONG $0x54b70f42; WORD $0x223e // movzx edx, word [rsi + r15 + 34] + LONG $0x10245489 // mov dword [rsp + 16], edx + QUAD $0x070e0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 14], 7 + LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 + LONG $0x74c40f66; WORD $0x120e; BYTE $0x01 // pinsrw xmm6, word [rsi + rcx + 18], 1 + QUAD $0x02120674c40f4266 // pinsrw xmm6, word [rsi + r8 + 18], 2 + QUAD $0x03122674c40f4266 // pinsrw xmm6, word [rsi + r12 + 18], 3 + QUAD $0x04122e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 18], 4 + LONG $0x74c40f66; WORD $0x121e; BYTE $0x05 // pinsrw xmm6, word [rsi + rbx + 18], 5 + LONG $0x74c40f66; WORD $0x123e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 18], 6 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + QUAD $0x07120e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 18], 7 + LONG $0x750f4166; BYTE $0xf3 // pcmpeqw xmm6, xmm11 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0xeb0f4566; BYTE $0xec // por xmm13, xmm12 + QUAD $0x0000e0a56f0f4466; BYTE $0x00 // movdqa xmm12, oword 224[rbp] /* [rip + .LCPI4_14] */ + LONG $0xc16f0f66 // movdqa xmm0, xmm1 + LONG $0x380f4566; WORD $0xe710 // pblendvb xmm12, xmm15, xmm0 + LONG $0x6f0f4566; BYTE $0xc1 // movdqa xmm8, xmm9 + LONG $0xc66f0f66 // movdqa xmm0, xmm6 + LONG $0x380f4566; WORD $0xc710 // pblendvb xmm8, xmm15, xmm0 + LONG $0xc86e0f66 // movd xmm1, eax + LONG $0x74b70f46; WORD $0x243e // movzx r14d, word [rsi + r15 + 36] + LONG $0x7cc40f66; WORD $0x100e; BYTE $0x01 // pinsrw xmm7, word [rsi + rcx + 16], 1 + QUAD $0x0210067cc40f4266 // pinsrw xmm7, word [rsi + r8 + 16], 2 + QUAD $0x0310267cc40f4266 // pinsrw xmm7, word [rsi + r12 + 16], 3 + QUAD $0x04102e7cc40f4266 // pinsrw xmm7, word [rsi + r13 + 16], 4 + LONG $0x7cc40f66; WORD $0x101e; BYTE $0x05 // pinsrw xmm7, word [rsi + rbx + 16], 5 + LONG $0x7cc40f66; WORD $0x103e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 16], 6 + LONG $0x64c40f66; WORD $0x140e; BYTE $0x01 // pinsrw xmm4, word [rsi + rcx + 20], 1 + QUAD $0x02140664c40f4266 // pinsrw xmm4, word [rsi + r8 + 20], 2 + QUAD $0x03142664c40f4266 // pinsrw xmm4, word [rsi + r12 + 20], 3 + QUAD $0x04142e64c40f4266 // pinsrw xmm4, word [rsi + r13 + 20], 4 + LONG $0x64c40f66; WORD $0x141e; BYTE $0x05 // pinsrw xmm4, word [rsi + rbx + 20], 5 + LONG $0x64c40f66; WORD $0x143e; BYTE $0x06 // pinsrw xmm4, word [rsi + rdi + 20], 6 + QUAD $0x07140e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 20], 7 + LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 + LONG $0xe4630f66 // packsswb xmm4, xmm4 + LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 + LONG $0x6f0f4166; BYTE $0xee // movdqa xmm5, xmm14 + LONG $0xc46f0f66 // movdqa xmm0, xmm4 + LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 + LONG $0x6e0f4166; BYTE $0xe3 // movd xmm4, r11d + LONG $0x5cb70f46; WORD $0x263e // movzx r11d, word [rsi + r15 + 38] + QUAD $0x07100e7cc40f4266 // pinsrw xmm7, word [rsi + r9 + 16], 7 + LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 + QUAD $0x00000160bdef0f66 // pxor xmm7, oword 352[rbp] /* [rip + .LCPI4_22] */ + LONG $0xff630f66 // packsswb xmm7, xmm7 + LONG $0x5cc40f66; WORD $0x160e; BYTE $0x01 // pinsrw xmm3, word [rsi + rcx + 22], 1 + QUAD $0x0216065cc40f4266 // pinsrw xmm3, word [rsi + r8 + 22], 2 + QUAD $0x0316265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 22], 3 + QUAD $0x04162e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 22], 4 + LONG $0x5cc40f66; WORD $0x161e; BYTE $0x05 // pinsrw xmm3, word [rsi + rbx + 22], 5 + LONG $0x5cc40f66; WORD $0x163e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 22], 6 + QUAD $0x07160e5cc40f4266 // pinsrw xmm3, word [rsi + r9 + 22], 7 + LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 + LONG $0xdb630f66 // packsswb xmm3, xmm3 + LONG $0x54c40f66; WORD $0x180e; BYTE $0x01 // pinsrw xmm2, word [rsi + rcx + 24], 1 + QUAD $0x02180654c40f4266 // pinsrw xmm2, word [rsi + r8 + 24], 2 + QUAD $0x03182654c40f4266 // pinsrw xmm2, word [rsi + r12 + 24], 3 + QUAD $0x04182e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 24], 4 + LONG $0x54c40f66; WORD $0x181e; BYTE $0x05 // pinsrw xmm2, word [rsi + rbx + 24], 5 + LONG $0x54c40f66; WORD $0x183e; BYTE $0x06 // pinsrw xmm2, word [rsi + rdi + 24], 6 + QUAD $0x07180e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 24], 7 + LONG $0xf80f4466; BYTE $0xc7 // psubb xmm8, xmm7 + QUAD $0x0000a0956f0f4466; BYTE $0x00 // movdqa xmm10, oword 160[rbp] /* [rip + .LCPI4_10] */ + LONG $0x6f0f4566; BYTE $0xf2 // movdqa xmm14, xmm10 + LONG $0xc36f0f66 // movdqa xmm0, xmm3 + LONG $0x380f4566; WORD $0xf710 // pblendvb xmm14, xmm15, xmm0 + LONG $0x5c6e0f66; WORD $0x3024 // movd xmm3, dword [rsp + 48] + LONG $0x44b70f42; WORD $0x283e // movzx eax, word [rsi + r15 + 40] + LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xeb0f4466; BYTE $0xf5 // por xmm14, xmm5 + QUAD $0x0000b08d6f0f4466; BYTE $0x00 // movdqa xmm9, oword 176[rbp] /* [rip + .LCPI4_11] */ + LONG $0x6f0f4566; BYTE $0xe9 // movdqa xmm13, xmm9 + LONG $0xc26f0f66 // movdqa xmm0, xmm2 + LONG $0x380f4566; WORD $0xef10 // pblendvb xmm13, xmm15, xmm0 + LONG $0x7c6e0f66; WORD $0x2824 // movd xmm7, dword [rsp + 40] + LONG $0x54b70f46; WORD $0x2a3e // movzx r10d, word [rsi + r15 + 42] + LONG $0x4cc40f66; WORD $0x1a0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 26], 1 + QUAD $0x021a064cc40f4266 // pinsrw xmm1, word [rsi + r8 + 26], 2 + QUAD $0x031a264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 26], 3 + QUAD $0x041a2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 26], 4 + LONG $0x4cc40f66; WORD $0x1a1e; BYTE $0x05 // pinsrw xmm1, word [rsi + rbx + 26], 5 + LONG $0x4cc40f66; WORD $0x1a3e; BYTE $0x06 // pinsrw xmm1, word [rsi + rdi + 26], 6 + QUAD $0x071a0e4cc40f4266 // pinsrw xmm1, word [rsi + r9 + 26], 7 + LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + LONG $0x64c40f66; WORD $0x1c0e; BYTE $0x01 // pinsrw xmm4, word [rsi + rcx + 28], 1 + QUAD $0x021c0664c40f4266 // pinsrw xmm4, word [rsi + r8 + 28], 2 QUAD $0x031c2664c40f4266 // pinsrw xmm4, word [rsi + r12 + 28], 3 QUAD $0x041c2e64c40f4266 // pinsrw xmm4, word [rsi + r13 + 28], 4 LONG $0x64c40f66; WORD $0x1c1e; BYTE $0x05 // pinsrw xmm4, word [rsi + rbx + 28], 5 @@ -23239,7 +24670,7 @@ LBB4_190: LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 LONG $0x546e0f66; WORD $0x1024 // movd xmm2, dword [rsp + 16] LONG $0x54b70f42; WORD $0x2c3e // movzx edx, word [rsi + r15 + 44] - LONG $0x20245489 // mov dword [rsp + 32], edx + LONG $0x28245489 // mov dword [rsp + 40], edx QUAD $0x071c0e64c40f4266 // pinsrw xmm4, word [rsi + r9 + 28], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -23326,7 +24757,7 @@ LBB4_190: LONG $0x6f0f4166; BYTE $0xea // movdqa xmm5, xmm10 LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xef10 // pblendvb xmm5, xmm15, xmm0 - LONG $0x4c6e0f66; WORD $0x2024 // movd xmm1, dword [rsp + 32] + LONG $0x4c6e0f66; WORD $0x2824 // movd xmm1, dword [rsp + 40] LONG $0x54b70f46; WORD $0x363e // movzx r10d, word [rsi + r15 + 54] LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -23479,7 +24910,7 @@ LBB4_190: LONG $0x44c40f66; WORD $0x3e0e; BYTE $0x01 // pinsrw xmm0, word [rsi + rcx + 62], 1 QUAD $0x023e0644c40f4266 // pinsrw xmm0, word [rsi + r8 + 62], 2 QUAD $0x033e2644c40f4266 // pinsrw xmm0, word [rsi + r12 + 62], 3 - QUAD $0x0000008024b48b4c // mov r14, qword [rsp + 128] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x043e2e44c40f4266 // pinsrw xmm0, word [rsi + r13 + 62], 4 LONG $0x44c40f66; WORD $0x3e1e; BYTE $0x05 // pinsrw xmm0, word [rsi + rbx + 62], 5 LONG $0x44c40f66; WORD $0x3e3e; BYTE $0x06 // pinsrw xmm0, word [rsi + rdi + 62], 6 @@ -23500,391 +24931,21 @@ LBB4_190: LONG $0x600f4566; BYTE $0xc6 // punpcklbw xmm8, xmm14 LONG $0x600f4566; BYTE $0xe5 // punpcklbw xmm12, xmm13 LONG $0x610f4566; BYTE $0xe0 // punpcklwd xmm12, xmm8 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - LONG $0x7f0f45f3; WORD $0x8e24 // movdqu oword [r14 + 4*rcx], xmm12 - LONG $0x7f0f41f3; WORD $0x8e44; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm0 + LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] + LONG $0x7f0f44f3; WORD $0x8824 // movdqu oword [rax + 4*rcx], xmm12 + LONG $0x447f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm0 LONG $0x08c18348 // add rcx, 8 WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x244c3b48; BYTE $0x18 // cmp rcx, qword [rsp + 24] - JNE LBB4_190 - QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] - LONG $0x24543b4c; BYTE $0x18 // cmp r10, qword [rsp + 24] - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] + LONG $0x244c3b48; BYTE $0x20 // cmp rcx, qword [rsp + 32] + JNE LBB4_192 + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] + LONG $0x24543b4c; BYTE $0x20 // cmp r10, qword [rsp + 32] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] LONG $0x246c8b44; BYTE $0x40 // mov r13d, dword [rsp + 64] LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - JNE LBB4_104 - JMP LBB4_144 - -LBB4_192: - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - LONG $0xfce08349 // and r8, -4 - WORD $0x894c; BYTE $0xc3 // mov rbx, r8 - LONG $0x07e3c148 // shl rbx, 7 - WORD $0x0148; BYTE $0xf3 // add rbx, rsi - LONG $0x861c8d4f // lea r11, [r14 + 4*r8] - WORD $0x280f; BYTE $0xc8 // movaps xmm1, xmm0 - LONG $0x00c8c60f // shufps xmm1, xmm0, 0 - LONG $0xfcc68148; WORD $0x0001; BYTE $0x00 // add rsi, 508 - WORD $0xc931 // xor ecx, ecx - LONG $0x6f0f4466; WORD $0x007d // movdqa xmm15, oword 0[rbp] /* [rip + .LCPI4_0] */ - LONG $0x6f0f4466; WORD $0x1045 // movdqa xmm8, oword 16[rbp] /* [rip + .LCPI4_1] */ - LONG $0x6f0f4466; WORD $0x2055 // movdqa xmm10, oword 32[rbp] /* [rip + .LCPI4_2] */ - LONG $0x6f0f4466; WORD $0x305d // movdqa xmm11, oword 48[rbp] /* [rip + .LCPI4_3] */ - LONG $0x6f0f4466; WORD $0x4065 // movdqa xmm12, oword 64[rbp] /* [rip + .LCPI4_4] */ - LONG $0x6f0f4466; WORD $0x506d // movdqa xmm13, oword 80[rbp] /* [rip + .LCPI4_5] */ - LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI4_6] */ - LONG $0x6f0f4466; WORD $0x704d // movdqa xmm9, oword 112[rbp] /* [rip + .LCPI4_7] */ - -LBB4_193: - QUAD $0xfffffe04b6100ff3 // movss xmm6, dword [rsi - 508] - QUAD $0xfffffe08be100ff3 // movss xmm7, dword [rsi - 504] - QUAD $0xfffffe0cae100ff3 // movss xmm5, dword [rsi - 500] - QUAD $0xfffffe10a6100ff3 // movss xmm4, dword [rsi - 496] - QUAD $0xfffe84b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 380], 16 - QUAD $0xffff04b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 252], 32 - LONG $0x213a0f66; WORD $0x8476; BYTE $0x30 // insertps xmm6, dword [rsi - 124], 48 - LONG $0x04f1c20f // cmpneqps xmm6, xmm1 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - QUAD $0xfffe88be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 376], 16 - QUAD $0xffff08be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 248], 32 - LONG $0x213a0f66; WORD $0x887e; BYTE $0x30 // insertps xmm7, dword [rsi - 120], 48 - QUAD $0xfffe8cae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 372], 16 - QUAD $0xffff0cae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 244], 32 - LONG $0x213a0f66; WORD $0x8c6e; BYTE $0x30 // insertps xmm5, dword [rsi - 116], 48 - QUAD $0xfffe90a6213a0f66; WORD $0x10ff // insertps xmm4, dword [rsi - 368], 16 - QUAD $0xffff10a6213a0f66; WORD $0x20ff // insertps xmm4, dword [rsi - 240], 32 - LONG $0x213a0f66; WORD $0x9066; BYTE $0x30 // insertps xmm4, dword [rsi - 112], 48 - LONG $0x04f9c20f // cmpneqps xmm7, xmm1 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0xd76f0f66 // movdqa xmm2, xmm7 - LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 - LONG $0xd7f80f66 // psubb xmm2, xmm7 - QUAD $0xfffffe14be100ff3 // movss xmm7, dword [rsi - 492] - QUAD $0xfffe94be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 364], 16 - QUAD $0xffff14be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 236], 32 - LONG $0x213a0f66; WORD $0x947e; BYTE $0x30 // insertps xmm7, dword [rsi - 108], 48 - LONG $0xd6eb0f66 // por xmm2, xmm6 - QUAD $0xfffffe18b6100ff3 // movss xmm6, dword [rsi - 488] - QUAD $0xfffe98b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 360], 16 - QUAD $0xffff18b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 232], 32 - LONG $0x213a0f66; WORD $0x9876; BYTE $0x30 // insertps xmm6, dword [rsi - 104], 48 - LONG $0x04e9c20f // cmpneqps xmm5, xmm1 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x02 // psllw xmm5, 2 - LONG $0xdb0f4166; BYTE $0xe8 // pand xmm5, xmm8 - LONG $0xeaeb0f66 // por xmm5, xmm2 - QUAD $0xfffffe1c9e100ff3 // movss xmm3, dword [rsi - 484] - QUAD $0xfffe9c9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 356], 16 - QUAD $0xffff1c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 228], 32 - LONG $0x213a0f66; WORD $0x9c5e; BYTE $0x30 // insertps xmm3, dword [rsi - 100], 48 - LONG $0x04e1c20f // cmpneqps xmm4, xmm1 - LONG $0xe46b0f66 // packssdw xmm4, xmm4 - LONG $0xe4630f66 // packsswb xmm4, xmm4 - LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 - LONG $0xf4710f66; BYTE $0x03 // psllw xmm4, 3 - LONG $0xdb0f4166; BYTE $0xe2 // pand xmm4, xmm10 - LONG $0x04f9c20f // cmpneqps xmm7, xmm1 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 - LONG $0xf7710f66; BYTE $0x04 // psllw xmm7, 4 - LONG $0xdb0f4166; BYTE $0xfb // pand xmm7, xmm11 - LONG $0xfceb0f66 // por xmm7, xmm4 - QUAD $0xfffffe20a6100ff3 // movss xmm4, dword [rsi - 480] - QUAD $0xfffea0a6213a0f66; WORD $0x10ff // insertps xmm4, dword [rsi - 352], 16 - QUAD $0xffff20a6213a0f66; WORD $0x20ff // insertps xmm4, dword [rsi - 224], 32 - LONG $0x213a0f66; WORD $0xa066; BYTE $0x30 // insertps xmm4, dword [rsi - 96], 48 - LONG $0xfdeb0f66 // por xmm7, xmm5 - QUAD $0xfffffe24ae100ff3 // movss xmm5, dword [rsi - 476] - QUAD $0xfffea4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 348], 16 - QUAD $0xffff24ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 220], 32 - LONG $0x213a0f66; WORD $0xa46e; BYTE $0x30 // insertps xmm5, dword [rsi - 92], 48 - LONG $0x04e9c20f // cmpneqps xmm5, xmm1 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0x04f1c20f // cmpneqps xmm6, xmm1 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf6710f66; BYTE $0x05 // psllw xmm6, 5 - LONG $0xdb0f4166; BYTE $0xf4 // pand xmm6, xmm12 - LONG $0x04d9c20f // cmpneqps xmm3, xmm1 - LONG $0xdb6b0f66 // packssdw xmm3, xmm3 - LONG $0xdb630f66 // packsswb xmm3, xmm3 - LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 - LONG $0xf3710f66; BYTE $0x06 // psllw xmm3, 6 - LONG $0xdb0f4166; BYTE $0xdd // pand xmm3, xmm13 - LONG $0xdeeb0f66 // por xmm3, xmm6 - QUAD $0xfffffe2896100ff3 // movss xmm2, dword [rsi - 472] - QUAD $0xfffea896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 344], 16 - QUAD $0xffff2896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 216], 32 - LONG $0x213a0f66; WORD $0xa856; BYTE $0x30 // insertps xmm2, dword [rsi - 88], 48 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0x04e1c20f // cmpneqps xmm4, xmm1 - LONG $0xe46b0f66 // packssdw xmm4, xmm4 - LONG $0xe4630f66 // packsswb xmm4, xmm4 - LONG $0xf4710f66; BYTE $0x07 // psllw xmm4, 7 - LONG $0xdb0f4166; BYTE $0xe6 // pand xmm4, xmm14 - LONG $0xe3eb0f66 // por xmm4, xmm3 - QUAD $0xfffffe2c9e100ff3 // movss xmm3, dword [rsi - 468] - QUAD $0xfffeac9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 340], 16 - QUAD $0xffff2c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 212], 32 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0x213a0f66; WORD $0xac5e; BYTE $0x30 // insertps xmm3, dword [rsi - 84], 48 - LONG $0xe7eb0f66 // por xmm4, xmm7 - LONG $0x04d1c20f // cmpneqps xmm2, xmm1 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xf26f0f66 // movdqa xmm6, xmm2 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf2f80f66 // psubb xmm6, xmm2 - QUAD $0xfffffe30be100ff3 // movss xmm7, dword [rsi - 464] - QUAD $0xfffeb0be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 336], 16 - QUAD $0xffff30be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 208], 32 - LONG $0x213a0f66; WORD $0xb07e; BYTE $0x30 // insertps xmm7, dword [rsi - 80], 48 - LONG $0xf5eb0f66 // por xmm6, xmm5 - QUAD $0xfffffe34ae100ff3 // movss xmm5, dword [rsi - 460] - QUAD $0xfffeb4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 332], 16 - QUAD $0xffff34ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 204], 32 - LONG $0x213a0f66; WORD $0xb46e; BYTE $0x30 // insertps xmm5, dword [rsi - 76], 48 - LONG $0x04d9c20f // cmpneqps xmm3, xmm1 - LONG $0xdb6b0f66 // packssdw xmm3, xmm3 - LONG $0xdb630f66 // packsswb xmm3, xmm3 - LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 - LONG $0xf3710f66; BYTE $0x02 // psllw xmm3, 2 - LONG $0xdb0f4166; BYTE $0xd8 // pand xmm3, xmm8 - LONG $0xdeeb0f66 // por xmm3, xmm6 - QUAD $0xfffffe38b6100ff3 // movss xmm6, dword [rsi - 456] - QUAD $0xfffeb8b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 328], 16 - QUAD $0xffff38b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 200], 32 - LONG $0x213a0f66; WORD $0xb876; BYTE $0x30 // insertps xmm6, dword [rsi - 72], 48 - LONG $0x04f9c20f // cmpneqps xmm7, xmm1 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 - LONG $0xf7710f66; BYTE $0x03 // psllw xmm7, 3 - LONG $0xdb0f4166; BYTE $0xfa // pand xmm7, xmm10 - LONG $0x04e9c20f // cmpneqps xmm5, xmm1 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x04 // psllw xmm5, 4 - LONG $0xdb0f4166; BYTE $0xeb // pand xmm5, xmm11 - LONG $0xefeb0f66 // por xmm5, xmm7 - QUAD $0xfffffe3c96100ff3 // movss xmm2, dword [rsi - 452] - QUAD $0xfffebc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 324], 16 - QUAD $0xffff3c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 196], 32 - LONG $0x213a0f66; WORD $0xbc56; BYTE $0x30 // insertps xmm2, dword [rsi - 68], 48 - LONG $0xebeb0f66 // por xmm5, xmm3 - QUAD $0xfffffe40be100ff3 // movss xmm7, dword [rsi - 448] - QUAD $0xfffec0be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 320], 16 - QUAD $0xffff40be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 192], 32 - LONG $0x213a0f66; WORD $0xc07e; BYTE $0x30 // insertps xmm7, dword [rsi - 64], 48 - LONG $0x04f1c20f // cmpneqps xmm6, xmm1 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf6710f66; BYTE $0x05 // psllw xmm6, 5 - LONG $0xdb0f4166; BYTE $0xf4 // pand xmm6, xmm12 - LONG $0x04d1c20f // cmpneqps xmm2, xmm1 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 - LONG $0xf2710f66; BYTE $0x06 // psllw xmm2, 6 - LONG $0xdb0f4166; BYTE $0xd5 // pand xmm2, xmm13 - LONG $0xd6eb0f66 // por xmm2, xmm6 - QUAD $0xfffffe44b6100ff3 // movss xmm6, dword [rsi - 444] - QUAD $0xfffec4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 316], 16 - QUAD $0xffff44b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 188], 32 - LONG $0x213a0f66; WORD $0xc476; BYTE $0x30 // insertps xmm6, dword [rsi - 60], 48 - LONG $0x04f1c20f // cmpneqps xmm6, xmm1 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0x04f9c20f // cmpneqps xmm7, xmm1 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0xf7710f66; BYTE $0x07 // psllw xmm7, 7 - LONG $0xdb0f4166; BYTE $0xfe // pand xmm7, xmm14 - LONG $0xfaeb0f66 // por xmm7, xmm2 - QUAD $0xfffffe4896100ff3 // movss xmm2, dword [rsi - 440] - QUAD $0xfffec896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 312], 16 - QUAD $0xffff4896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 184], 32 - LONG $0x213a0f66; WORD $0xc856; BYTE $0x30 // insertps xmm2, dword [rsi - 56], 48 - LONG $0xfdeb0f66 // por xmm7, xmm5 - QUAD $0xfffffe4c9e100ff3 // movss xmm3, dword [rsi - 436] - QUAD $0xfffecc9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 308], 16 - QUAD $0xffff4c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 180], 32 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0x213a0f66; WORD $0xcc5e; BYTE $0x30 // insertps xmm3, dword [rsi - 52], 48 - LONG $0xe7620f66 // punpckldq xmm4, xmm7 - LONG $0x04d1c20f // cmpneqps xmm2, xmm1 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xfa6f0f66 // movdqa xmm7, xmm2 - LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 - LONG $0xfaf80f66 // psubb xmm7, xmm2 - QUAD $0xfffffe50ae100ff3 // movss xmm5, dword [rsi - 432] - QUAD $0xfffed0ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 304], 16 - QUAD $0xffff50ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 176], 32 - LONG $0x213a0f66; WORD $0xd06e; BYTE $0x30 // insertps xmm5, dword [rsi - 48], 48 - LONG $0xfeeb0f66 // por xmm7, xmm6 - QUAD $0xfffffe54b6100ff3 // movss xmm6, dword [rsi - 428] - QUAD $0xfffed4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 300], 16 - QUAD $0xffff54b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 172], 32 - LONG $0x213a0f66; WORD $0xd476; BYTE $0x30 // insertps xmm6, dword [rsi - 44], 48 - LONG $0x04d9c20f // cmpneqps xmm3, xmm1 - LONG $0xdb6b0f66 // packssdw xmm3, xmm3 - LONG $0xdb630f66 // packsswb xmm3, xmm3 - LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 - LONG $0xf3710f66; BYTE $0x02 // psllw xmm3, 2 - LONG $0xdb0f4166; BYTE $0xd8 // pand xmm3, xmm8 - LONG $0xdfeb0f66 // por xmm3, xmm7 - QUAD $0xfffffe58be100ff3 // movss xmm7, dword [rsi - 424] - QUAD $0xfffed8be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 296], 16 - QUAD $0xffff58be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 168], 32 - LONG $0x213a0f66; WORD $0xd87e; BYTE $0x30 // insertps xmm7, dword [rsi - 40], 48 - LONG $0x04e9c20f // cmpneqps xmm5, xmm1 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x03 // psllw xmm5, 3 - LONG $0xdb0f4166; BYTE $0xea // pand xmm5, xmm10 - LONG $0x04f1c20f // cmpneqps xmm6, xmm1 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf6710f66; BYTE $0x04 // psllw xmm6, 4 - LONG $0xdb0f4166; BYTE $0xf3 // pand xmm6, xmm11 - LONG $0xf5eb0f66 // por xmm6, xmm5 - QUAD $0xfffffe5c96100ff3 // movss xmm2, dword [rsi - 420] - QUAD $0xfffedc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 292], 16 - QUAD $0xffff5c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 164], 32 - LONG $0x213a0f66; WORD $0xdc56; BYTE $0x30 // insertps xmm2, dword [rsi - 36], 48 - LONG $0xf3eb0f66 // por xmm6, xmm3 - QUAD $0xfffffe60ae100ff3 // movss xmm5, dword [rsi - 416] - QUAD $0xfffee0ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 288], 16 - QUAD $0xffff60ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 160], 32 - LONG $0x213a0f66; WORD $0xe06e; BYTE $0x30 // insertps xmm5, dword [rsi - 32], 48 - LONG $0x04f9c20f // cmpneqps xmm7, xmm1 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 - LONG $0xf7710f66; BYTE $0x05 // psllw xmm7, 5 - LONG $0xdb0f4166; BYTE $0xfc // pand xmm7, xmm12 - LONG $0x04d1c20f // cmpneqps xmm2, xmm1 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 - LONG $0xf2710f66; BYTE $0x06 // psllw xmm2, 6 - LONG $0xdb0f4166; BYTE $0xd5 // pand xmm2, xmm13 - LONG $0xd7eb0f66 // por xmm2, xmm7 - QUAD $0xfffffe64be100ff3 // movss xmm7, dword [rsi - 412] - QUAD $0xfffee4be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 284], 16 - QUAD $0xffff64be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 156], 32 - LONG $0x213a0f66; WORD $0xe47e; BYTE $0x30 // insertps xmm7, dword [rsi - 28], 48 - LONG $0x04f9c20f // cmpneqps xmm7, xmm1 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0x04e9c20f // cmpneqps xmm5, xmm1 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xf5710f66; BYTE $0x07 // psllw xmm5, 7 - LONG $0xdb0f4166; BYTE $0xee // pand xmm5, xmm14 - LONG $0xeaeb0f66 // por xmm5, xmm2 - QUAD $0xfffffe6896100ff3 // movss xmm2, dword [rsi - 408] - QUAD $0xfffee896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 280], 16 - QUAD $0xffff6896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 152], 32 - LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 - LONG $0x213a0f66; WORD $0xe856; BYTE $0x30 // insertps xmm2, dword [rsi - 24], 48 - LONG $0xeeeb0f66 // por xmm5, xmm6 - LONG $0x04d1c20f // cmpneqps xmm2, xmm1 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xf26f0f66 // movdqa xmm6, xmm2 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf2f80f66 // psubb xmm6, xmm2 - QUAD $0xfffffe6c9e100ff3 // movss xmm3, dword [rsi - 404] - QUAD $0xfffeec9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 276], 16 - QUAD $0xffff6c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 148], 32 - LONG $0x213a0f66; WORD $0xec5e; BYTE $0x30 // insertps xmm3, dword [rsi - 20], 48 - LONG $0xf7eb0f66 // por xmm6, xmm7 - QUAD $0xfffffe7096100ff3 // movss xmm2, dword [rsi - 400] - QUAD $0xfffef096213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 272], 16 - QUAD $0xffff7096213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 144], 32 - LONG $0x213a0f66; WORD $0xf056; BYTE $0x30 // insertps xmm2, dword [rsi - 16], 48 - LONG $0x04d9c20f // cmpneqps xmm3, xmm1 - LONG $0xdb6b0f66 // packssdw xmm3, xmm3 - LONG $0xdb630f66 // packsswb xmm3, xmm3 - LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 - LONG $0xf3710f66; BYTE $0x02 // psllw xmm3, 2 - LONG $0xdb0f4166; BYTE $0xd8 // pand xmm3, xmm8 - LONG $0xdeeb0f66 // por xmm3, xmm6 - QUAD $0xfffffe74b6100ff3 // movss xmm6, dword [rsi - 396] - QUAD $0xfffef4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 268], 16 - QUAD $0xffff74b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 140], 32 - LONG $0x213a0f66; WORD $0xf476; BYTE $0x30 // insertps xmm6, dword [rsi - 12], 48 - LONG $0x04d1c20f // cmpneqps xmm2, xmm1 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 - LONG $0xf2710f66; BYTE $0x03 // psllw xmm2, 3 - LONG $0xdb0f4166; BYTE $0xd2 // pand xmm2, xmm10 - LONG $0x04f1c20f // cmpneqps xmm6, xmm1 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf6710f66; BYTE $0x04 // psllw xmm6, 4 - LONG $0xdb0f4166; BYTE $0xf3 // pand xmm6, xmm11 - LONG $0xf2eb0f66 // por xmm6, xmm2 - QUAD $0xfffffe78be100ff3 // movss xmm7, dword [rsi - 392] - QUAD $0xfffef8be213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 264], 16 - QUAD $0xffff78be213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 136], 32 - LONG $0x213a0f66; WORD $0xf87e; BYTE $0x30 // insertps xmm7, dword [rsi - 8], 48 - LONG $0xf3eb0f66 // por xmm6, xmm3 - QUAD $0xfffffe7c96100ff3 // movss xmm2, dword [rsi - 388] - QUAD $0xfffefc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 260], 16 - QUAD $0xffff7c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 132], 32 - LONG $0x213a0f66; WORD $0xfc56; BYTE $0x30 // insertps xmm2, dword [rsi - 4], 48 - LONG $0x04f9c20f // cmpneqps xmm7, xmm1 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0xff630f66 // packsswb xmm7, xmm7 - LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 - LONG $0xf7710f66; BYTE $0x05 // psllw xmm7, 5 - LONG $0xdb0f4166; BYTE $0xfc // pand xmm7, xmm12 - LONG $0x04d1c20f // cmpneqps xmm2, xmm1 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xdb0f4166; BYTE $0xd7 // pand xmm2, xmm15 - LONG $0xf2710f66; BYTE $0x06 // psllw xmm2, 6 - LONG $0xdb0f4166; BYTE $0xd5 // pand xmm2, xmm13 - LONG $0xd7eb0f66 // por xmm2, xmm7 - QUAD $0xfffffe809e100ff3 // movss xmm3, dword [rsi - 384] - QUAD $0xffff009e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 256], 16 - LONG $0x213a0f66; WORD $0x805e; BYTE $0x20 // insertps xmm3, dword [rsi - 128], 32 - LONG $0x213a0f66; WORD $0x301e // insertps xmm3, dword [rsi], 48 - LONG $0x04d9c20f // cmpneqps xmm3, xmm1 - LONG $0xdb6b0f66 // packssdw xmm3, xmm3 - LONG $0xdb630f66 // packsswb xmm3, xmm3 - LONG $0xf3710f66; BYTE $0x07 // psllw xmm3, 7 - LONG $0xdb0f4166; BYTE $0xde // pand xmm3, xmm14 - LONG $0xdaeb0f66 // por xmm3, xmm2 - LONG $0xdeeb0f66 // por xmm3, xmm6 - LONG $0xeb620f66 // punpckldq xmm5, xmm3 - LONG $0xe5600f66 // punpcklbw xmm4, xmm5 - LONG $0x380f4166; WORD $0xe100 // pshufb xmm4, xmm9 - LONG $0x7f0f41f3; WORD $0x8e24 // movdqu oword [r14 + 4*rcx], xmm4 - LONG $0x04c18348 // add rcx, 4 - LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // add rsi, 512 - WORD $0x3949; BYTE $0xc8 // cmp r8, rcx - JNE LBB4_193 - WORD $0x394d; BYTE $0xc2 // cmp r10, r8 - JNE LBB4_127 - JMP LBB4_148 + JNE LBB4_108 + JMP LBB4_111 DATA LCDATA4<>+0x000(SB)/8, $0x0000000001010101 DATA LCDATA4<>+0x008(SB)/8, $0x0000000000000000 @@ -23952,15 +25013,15 @@ TEXT ·_comparison_not_equal_scalar_arr_sse4(SB), $312-48 LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx WORD $0x8949; BYTE $0xd6 // mov r14, rdx WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JG LBB5_26 + JG LBB5_27 WORD $0xff83; BYTE $0x03 // cmp edi, 3 JLE LBB5_2 WORD $0xff83; BYTE $0x04 // cmp edi, 4 - JE LBB5_99 + JE LBB5_100 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB5_122 + JE LBB5_123 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB5_199 + JNE LBB5_198 WORD $0x8b44; BYTE $0x1e // mov r11d, dword [rsi] LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 @@ -24003,7 +25064,7 @@ LBB5_17: LONG $0x20ff8349 // cmp r15, 32 JL LBB5_21 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 LBB5_19: @@ -24012,35 +25073,35 @@ LBB5_19: LONG $0x785e3b45 // cmp r11d, dword [r14 + 120] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x745e3b45 // cmp r11d, dword [r14 + 116] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x705e3b45 // cmp r11d, dword [r14 + 112] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x6c5e3b45 // cmp r11d, dword [r14 + 108] + LONG $0x705e3b45 // cmp r11d, dword [r14 + 112] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x6c5e3b45 // cmp r11d, dword [r14 + 108] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x685e3b45 // cmp r11d, dword [r14 + 104] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0x645e3b45 // cmp r11d, dword [r14 + 100] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x5c5e3b45 // cmp r11d, dword [r14 + 92] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] LONG $0x585e3b45 // cmp r11d, dword [r14 + 88] - QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x545e3b45 // cmp r11d, dword [r14 + 84] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x505e3b45 // cmp r11d, dword [r14 + 80] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x4c5e3b45 // cmp r11d, dword [r14 + 76] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x485e3b45 // cmp r11d, dword [r14 + 72] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x445e3b45 // cmp r11d, dword [r14 + 68] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x3c5e3b45 // cmp r11d, dword [r14 + 60] LONG $0xd0950f41 // setne r8b LONG $0x385e3b45 // cmp r11d, dword [r14 + 56] QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x345e3b45 // cmp r11d, dword [r14 + 52] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x305e3b45 // cmp r11d, dword [r14 + 48] LONG $0xd7950f40 // setne dil LONG $0x2c5e3b45 // cmp r11d, dword [r14 + 44] @@ -24058,29 +25119,29 @@ LBB5_19: LONG $0x105e3b45 // cmp r11d, dword [r14 + 16] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x0c5e3b45 // cmp r11d, dword [r14 + 12] - LONG $0xd4950f41 // setne r12b + LONG $0xd5950f41 // setne r13b LONG $0x085e3b45 // cmp r11d, dword [r14 + 8] LONG $0xd7950f41 // setne r15b WORD $0x3b45; BYTE $0x1e // cmp r11d, dword [r14] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x045e3b45 // cmp r11d, dword [r14 + 4] - WORD $0x894d; BYTE $0xf5 // mov r13, r14 + WORD $0x894d; BYTE $0xf4 // mov r12, r14 LONG $0xd6950f41 // setne r14b - LONG $0x205d3b45 // cmp r11d, dword [r13 + 32] - QUAD $0x000000c02494950f // setne byte [rsp + 192] - LONG $0x405d3b45 // cmp r11d, dword [r13 + 64] + LONG $0x245c3b45; BYTE $0x20 // cmp r11d, dword [r12 + 32] QUAD $0x000000b02494950f // setne byte [rsp + 176] - LONG $0x605d3b45 // cmp r11d, dword [r13 + 96] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x245c3b45; BYTE $0x40 // cmp r11d, dword [r12 + 64] + QUAD $0x000000902494950f // setne byte [rsp + 144] + LONG $0x245c3b45; BYTE $0x60 // cmp r11d, dword [r12 + 96] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] WORD $0x0045; BYTE $0xf6 // add r14b, r14b - QUAD $0x0000009824b40244 // add r14b, byte [rsp + 152] + QUAD $0x000000c024b40244 // add r14b, byte [rsp + 192] LONG $0x02e7c041 // shl r15b, 2 WORD $0x0845; BYTE $0xf7 // or r15b, r14b - WORD $0x894d; BYTE $0xee // mov r14, r13 - LONG $0x03e4c041 // shl r12b, 3 - WORD $0x0845; BYTE $0xfc // or r12b, r15b + WORD $0x894d; BYTE $0xe6 // mov r14, r12 + LONG $0x03e5c041 // shl r13b, 3 + WORD $0x0845; BYTE $0xfd // or r13b, r15b WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0x0844; BYTE $0xe1 // or cl, r12b + WORD $0x0844; BYTE $0xe9 // or cl, r13b LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0xca08 // or dl, cl @@ -24090,14 +25151,14 @@ LBB5_19: WORD $0xd008 // or al, dl WORD $0x8841; BYTE $0x07 // mov byte [r15], al WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] + QUAD $0x000000b024b40240 // add sil, byte [rsp + 176] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xf1 // or r9b, sil LONG $0x03e2c041 // shl r10b, 3 WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e7c040 // shl dil, 4 WORD $0x0844; BYTE $0xd7 // or dil, r10b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf8 // or al, dil QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] @@ -24106,50 +25167,50 @@ LBB5_19: WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x01478845 // mov byte [r15 + 1], r8b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0xb0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 176] + LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] + LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl LONG $0x02478841 // mov byte [r15 + 2], al LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x60244402 // add al, byte [rsp + 96] + LONG $0x70244402 // add al, byte [rsp + 112] WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -24166,12 +25227,12 @@ LBB5_19: QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 JNE LBB5_19 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] LBB5_21: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 @@ -24182,7 +25243,7 @@ LBB5_21: WORD $0xff31 // xor edi, edi LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] -LBB5_143: +LBB5_144: WORD $0x3b45; BYTE $0x1e // cmp r11d, dword [r14] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -24210,18 +25271,18 @@ LBB5_143: WORD $0xd330 // xor bl, dl LONG $0x371c8841 // mov byte [r15 + rsi], bl WORD $0x3949; BYTE $0xf9 // cmp r9, rdi - JNE LBB5_143 + JNE LBB5_144 JMP LBB5_24 -LBB5_26: +LBB5_27: WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JLE LBB5_27 + JLE LBB5_28 WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB5_158 + JE LBB5_159 WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB5_170 + JE LBB5_171 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB5_199 + JNE LBB5_198 LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 LONG $0xd7490f4d // cmovns r10, r15 @@ -24231,14 +25292,16 @@ LBB5_26: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x06100ff2 // movsd xmm0, qword [rsi] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_49 + JE LBB5_50 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] -LBB5_47: +LBB5_48: LONG $0x2e0f4166; BYTE $0x06 // ucomisd xmm0, qword [r14] LONG $0x08768d4d // lea r14, [r14 + 8] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl WORD $0xdaf6 // neg dl LONG $0x07708d48 // lea rsi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax @@ -24256,195 +25319,283 @@ LBB5_47: LONG $0x303c8841 // mov byte [r8 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_47 + JNE LBB5_48 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_49: +LBB5_50: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_53 + JL LBB5_54 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 - QUAD $0x000000982494894c // mov qword [rsp + 152], r10 + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 -LBB5_51: +LBB5_52: WORD $0x894c; BYTE $0xf2 // mov rdx, r14 LONG $0x2e0f4166; BYTE $0x06 // ucomisd xmm0, qword [r14] - QUAD $0x000000c02494950f // setne byte [rsp + 192] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x2e0f4166; WORD $0x0846 // ucomisd xmm0, qword [r14 + 8] - LONG $0xd1950f41 // setne r9b - LONG $0x2e0f4166; WORD $0x1046 // ucomisd xmm0, qword [r14 + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al + LONG $0x2e0f4166; WORD $0x1046 // ucomisd xmm0, qword [r14 + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x2e0f4166; WORD $0x1846 // ucomisd xmm0, qword [r14 + 24] - LONG $0xd5950f41 // setne r13b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x2e0f4166; WORD $0x2046 // ucomisd xmm0, qword [r14 + 32] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x10244c88 // mov byte [rsp + 16], cl LONG $0x2e0f4166; WORD $0x2846 // ucomisd xmm0, qword [r14 + 40] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x2e0f4166; WORD $0x3046 // ucomisd xmm0, qword [r14 + 48] - WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x2e0f4166; WORD $0x3846 // ucomisd xmm0, qword [r14 + 56] - LONG $0xd4950f41 // setne r12b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x2e0f4166; WORD $0x4046 // ucomisd xmm0, qword [r14 + 64] - QUAD $0x000000902494950f // setne byte [rsp + 144] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x2e0f4166; WORD $0x4846 // ucomisd xmm0, qword [r14 + 72] - LONG $0xd6950f40 // setne sil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x2e0f4166; WORD $0x5046 // ucomisd xmm0, qword [r14 + 80] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al + LONG $0x50245c88 // mov byte [rsp + 80], bl LONG $0x2e0f4166; WORD $0x5846 // ucomisd xmm0, qword [r14 + 88] - LONG $0xd0950f41 // setne r8b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al + LONG $0x48245c88 // mov byte [rsp + 72], bl LONG $0x2e0f4166; WORD $0x6046 // ucomisd xmm0, qword [r14 + 96] - LONG $0xd2950f41 // setne r10b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al + LONG $0x68245c88 // mov byte [rsp + 104], bl LONG $0x2e0f4166; WORD $0x6846 // ucomisd xmm0, qword [r14 + 104] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al + LONG $0x60245c88 // mov byte [rsp + 96], bl LONG $0x2e0f4166; WORD $0x7046 // ucomisd xmm0, qword [r14 + 112] - QUAD $0x000000882494950f // setne byte [rsp + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd5950f41 // setne r13b + WORD $0x0841; BYTE $0xc5 // or r13b, al LONG $0x2e0f4166; WORD $0x7846 // ucomisd xmm0, qword [r14 + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl QUAD $0x000080862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 128] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl QUAD $0x000088862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 136] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl QUAD $0x000090862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 144] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd3 // setne bl + WORD $0xc308 // or bl, al QUAD $0x000098862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 152] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl QUAD $0x0000a0862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 160] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl QUAD $0x0000a8862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 168] - QUAD $0x000000802494950f // setne byte [rsp + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl QUAD $0x0000b0862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 176] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al QUAD $0x0000b8862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 184] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al + QUAD $0x0000c0862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 192] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl + QUAD $0x0000c8862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 200] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al + QUAD $0x0000d0862e0f4166; BYTE $0x00 // ucomisd xmm0, qword [r14 + 208] + WORD $0x9a0f; BYTE $0xd0 // setp al LONG $0xd6950f41 // setne r14b - QUAD $0x000000c0822e0f66 // ucomisd xmm0, qword [rdx + 192] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - QUAD $0x000000c8822e0f66 // ucomisd xmm0, qword [rdx + 200] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] - QUAD $0x000000d0822e0f66 // ucomisd xmm0, qword [rdx + 208] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + WORD $0x0841; BYTE $0xc6 // or r14b, al QUAD $0x000000d8822e0f66 // ucomisd xmm0, qword [rdx + 216] - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al QUAD $0x000000e0822e0f66 // ucomisd xmm0, qword [rdx + 224] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xc0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 192], cl QUAD $0x000000e8822e0f66 // ucomisd xmm0, qword [rdx + 232] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xa8248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 168], cl QUAD $0x000000f0822e0f66 // ucomisd xmm0, qword [rdx + 240] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al QUAD $0x000000f8822e0f66 // ucomisd xmm0, qword [rdx + 248] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x000000c0248c0244 // add r9b, byte [rsp + 192] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e4c041 // shl r12b, 7 - WORD $0x0841; BYTE $0xdc // or r12b, bl - LONG $0x02e3c041 // shl r11b, 2 - WORD $0x0845; BYTE $0xcb // or r11b, r9b - WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x0000009024b40240 // add sil, byte [rsp + 144] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xdd // or r13b, r11b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xf7 // or dil, sil - QUAD $0x000000b0249cb60f // movzx ebx, byte [rsp + 176] - WORD $0xe3c0; BYTE $0x04 // shl bl, 4 - WORD $0x0844; BYTE $0xeb // or bl, r13b - WORD $0x8941; BYTE $0xd9 // mov r9d, ebx - LONG $0x24748b48; BYTE $0x08 // mov rsi, qword [rsp + 8] - LONG $0x03e0c041 // shl r8b, 3 - WORD $0x0841; BYTE $0xf8 // or r8b, dil - LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] - WORD $0xe3c0; BYTE $0x05 // shl bl, 5 - WORD $0x0844; BYTE $0xcb // or bl, r9b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xc2 // or r10b, r8b - LONG $0x05e7c041 // shl r15b, 5 - WORD $0x0845; BYTE $0xd7 // or r15b, r10b - QUAD $0x0000008824bcb60f // movzx edi, byte [rsp + 136] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0x0841; BYTE $0xdc // or r12b, bl - WORD $0x0844; BYTE $0xf9 // or cl, r15b - LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] - WORD $0xdb00 // add bl, bl - LONG $0x78245c02 // add bl, byte [rsp + 120] - WORD $0xdf89 // mov edi, ebx - LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] - WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0xdf89 // mov edi, ebx - LONG $0x245cb60f; BYTE $0x70 // movzx ebx, byte [rsp + 112] - WORD $0xe3c0; BYTE $0x03 // shl bl, 3 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0xdf89 // mov edi, ebx - LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] - WORD $0xe3c0; BYTE $0x04 // shl bl, 4 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0xdf89 // mov edi, ebx - QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] - WORD $0xe3c0; BYTE $0x05 // shl bl, 5 - WORD $0x0840; BYTE $0xfb // or bl, dil - WORD $0x8844; BYTE $0x26 // mov byte [rsi], r12b - LONG $0x247cb60f; BYTE $0x58 // movzx edi, byte [rsp + 88] - LONG $0x06e7c040 // shl dil, 6 - LONG $0x07e6c041 // shl r14b, 7 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0x4e88; BYTE $0x01 // mov byte [rsi + 1], cl - WORD $0x0841; BYTE $0xde // or r14b, bl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f40 // setne sil + WORD $0x0840; BYTE $0xc6 // or sil, al + WORD $0x0045; BYTE $0xdb // add r11b, r11b + LONG $0x245c0244; BYTE $0x18 // add r11b, byte [rsp + 24] + LONG $0x05e1c041 // shl r9b, 5 + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + WORD $0x0844; BYTE $0xc8 // or al, r9b + WORD $0x8941; BYTE $0xc1 // mov r9d, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xd8 // or al, r11b + WORD $0x8941; BYTE $0xc3 // mov r11d, eax + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xc900 // add cl, cl - LONG $0x20244c02 // add cl, byte [rsp + 32] - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x30244c02 // add cl, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xc8 // or al, r9b + WORD $0x8941; BYTE $0xc1 // mov r9d, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xd8 // or al, r11b + WORD $0x8941; BYTE $0xc3 // mov r11d, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd8 // or al, r11b + LONG $0x10244488 // mov byte [rsp + 16], al + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0x8941; BYTE $0xc3 // mov r11d, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + LONG $0x06e5c041 // shl r13b, 6 + WORD $0x0841; BYTE $0xc5 // or r13b, al + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + WORD $0xc900 // add cl, cl + LONG $0x78244c02 // add cl, byte [rsp + 120] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0xcb08 // or bl, cl + QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd908 // or cl, bl WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + QUAD $0x00000098248cb60f // movzx ecx, byte [rsp + 152] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd908 // or cl, bl - WORD $0xcb89 // mov ebx, ecx - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd908 // or cl, bl - LONG $0x245cb60f; BYTE $0x10 // movzx ebx, byte [rsp + 16] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - WORD $0xe0c0; BYTE $0x07 // shl al, 7 - WORD $0xd808 // or al, bl - WORD $0xc808 // or al, cl - LONG $0x02768844 // mov byte [rsi + 2], r14b - WORD $0x4688; BYTE $0x03 // mov byte [rsi + 3], al + LONG $0x244c0a44; BYTE $0x10 // or r9b, byte [rsp + 16] + QUAD $0x000000b0249cb60f // movzx ebx, byte [rsp + 176] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e4c041 // shl r12b, 6 + WORD $0x0841; BYTE $0xdc // or r12b, bl + WORD $0x0845; BYTE $0xdd // or r13b, r11b + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xe0 // or r8b, r12b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0045; BYTE $0xff // add r15b, r15b + QUAD $0x0000009024bc0244 // add r15b, byte [rsp + 144] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xfe // or r14b, r15b + LONG $0x03e2c041 // shl r10b, 3 + WORD $0x0845; BYTE $0xf2 // or r10b, r14b + QUAD $0x000000c0248cb60f // movzx ecx, byte [rsp + 192] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xd1 // or cl, r10b + WORD $0x8844; BYTE $0x08 // mov byte [rax], r9b + QUAD $0x000000a8249cb60f // movzx ebx, byte [rsp + 168] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x01688844 // mov byte [rax + 1], r13b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0840; BYTE $0xce // or sil, cl + LONG $0x02408844 // mov byte [rax + 2], r8b + LONG $0x03708840 // mov byte [rax + 3], sil LONG $0x00b28d4c; WORD $0x0001; BYTE $0x00 // lea r14, [rdx + 256] - LONG $0x04c68348 // add rsi, 4 - LONG $0x24748948; BYTE $0x08 // mov qword [rsp + 8], rsi - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB5_51 + LONG $0x04c08348 // add rax, 4 + LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax + QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 + JNE LBB5_52 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000a824948b4c // mov r10, qword [rsp + 168] + QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] -LBB5_53: +LBB5_54: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB5_193 + JNE LBB5_194 WORD $0xff31 // xor edi, edi - JMP LBB5_195 + JMP LBB5_196 LBB5_2: WORD $0xff83; BYTE $0x02 // cmp edi, 2 - JE LBB5_56 + JE LBB5_57 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB5_199 + JNE LBB5_198 WORD $0x068a // mov al, byte [rsi] - LONG $0x40244488 // mov byte [rsp + 64], al + LONG $0x10244488 // mov byte [rsp + 16], al LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 LONG $0xd7490f4d // cmovns r10, r15 @@ -24458,7 +25609,7 @@ LBB5_2: LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] LBB5_6: - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] WORD $0x3a41; BYTE $0x0e // cmp cl, byte [r14] LONG $0x01768d4d // lea r14, [r14 + 1] WORD $0x950f; BYTE $0xd3 // setne bl @@ -24488,62 +25639,62 @@ LBB5_8: JL LBB5_9 LONG $0x10fa8349 // cmp r10, 16 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 - JB LBB5_82 + QUAD $0x000001082494894c // mov qword [rsp + 264], r10 + JB LBB5_83 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x014c; BYTE $0xf0 // add rax, r14 LONG $0x24443948; BYTE $0x08 // cmp qword [rsp + 8], rax - JAE LBB5_85 + JAE LBB5_86 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] LONG $0x90048d4a // lea rax, [rax + 4*r10] WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB5_85 + JAE LBB5_86 -LBB5_82: +LBB5_83: WORD $0xc031 // xor eax, eax - QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + QUAD $0x000000e824848948 // mov qword [rsp + 232], rax LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - QUAD $0x0000008024848948 // mov qword [rsp + 128], rax - -LBB5_88: - QUAD $0x000000d824942b4c // sub r10, qword [rsp + 216] - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + LONG $0x24448948; BYTE $0x50 // mov qword [rsp + 80], rax LBB5_89: - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + QUAD $0x000000e824942b4c // sub r10, qword [rsp + 232] + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 + +LBB5_90: + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] LONG $0x1f463a41 // cmp al, byte [r14 + 31] LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x1e463a41 // cmp al, byte [r14 + 30] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x1d463a41 // cmp al, byte [r14 + 29] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x1c463a41 // cmp al, byte [r14 + 28] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x1b463a41 // cmp al, byte [r14 + 27] + LONG $0x1c463a41 // cmp al, byte [r14 + 28] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x1b463a41 // cmp al, byte [r14 + 27] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x1a463a41 // cmp al, byte [r14 + 26] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0x19463a41 // cmp al, byte [r14 + 25] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x17463a41 // cmp al, byte [r14 + 23] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x16463a41 // cmp al, byte [r14 + 22] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x15463a41 // cmp al, byte [r14 + 21] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x14463a41 // cmp al, byte [r14 + 20] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x13463a41 // cmp al, byte [r14 + 19] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x12463a41 // cmp al, byte [r14 + 18] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x11463a41 // cmp al, byte [r14 + 17] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x0f463a41 // cmp al, byte [r14 + 15] LONG $0xd2950f41 // setne r10b LONG $0x0e463a41 // cmp al, byte [r14 + 14] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x0d463a41 // cmp al, byte [r14 + 13] LONG $0xd5950f41 // setne r13b LONG $0x0c463a41 // cmp al, byte [r14 + 12] @@ -24557,7 +25708,7 @@ LBB5_89: LONG $0x07463a41 // cmp al, byte [r14 + 7] LONG $0xd6950f40 // setne sil LONG $0x06463a41 // cmp al, byte [r14 + 6] - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x05463a41 // cmp al, byte [r14 + 5] LONG $0xd1950f41 // setne r9b LONG $0x04463a41 // cmp al, byte [r14 + 4] @@ -24571,9 +25722,9 @@ LBB5_89: LONG $0x01463a41 // cmp al, byte [r14 + 1] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x08463a41 // cmp al, byte [r14 + 8] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x10463a41 // cmp al, byte [r14 + 16] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x18463a41 // cmp al, byte [r14 + 24] QUAD $0x000000882494950f // setne byte [rsp + 136] WORD $0xc900 // add cl, cl @@ -24586,15 +25737,15 @@ LBB5_89: WORD $0x0841; BYTE $0xf8 // or r8b, dil LONG $0x05e1c041 // shl r9b, 5 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e6c040 // shl sil, 7 WORD $0x0840; BYTE $0xc6 // or sil, al WORD $0x0844; BYTE $0xce // or sil, r9b - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] WORD $0x8840; BYTE $0x30 // mov byte [rax], sil WORD $0x0045; BYTE $0xdb // add r11b, r11b - QUAD $0x00000098249c0244 // add r11b, byte [rsp + 152] + QUAD $0x000000c0249c0244 // add r11b, byte [rsp + 192] WORD $0xe3c0; BYTE $0x02 // shl bl, 2 WORD $0x0844; BYTE $0xdb // or bl, r11b LONG $0x03e7c041 // shl r15b, 3 @@ -24603,35 +25754,35 @@ LBB5_89: WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x05e5c041 // shl r13b, 5 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - QUAD $0x00000090248cb60f // movzx ecx, byte [rsp + 144] + QUAD $0x00000098248cb60f // movzx ecx, byte [rsp + 152] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e2c041 // shl r10b, 7 WORD $0x0841; BYTE $0xca // or r10b, cl WORD $0x0845; BYTE $0xea // or r10b, r13b LONG $0x01508844 // mov byte [rax + 1], r10b - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xc900 // add cl, cl - LONG $0xb0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 176] + LONG $0x90248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 144] WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x68 // movzx ecx, byte [rsp + 104] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + LONG $0x245cb60f; BYTE $0x68 // movzx ebx, byte [rsp + 104] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] + LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0xd908 // or cl, bl WORD $0xd108 // or cl, dl @@ -24644,19 +25795,19 @@ LBB5_89: WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x245cb60f; BYTE $0x10 // movzx ebx, byte [rsp + 16] + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] WORD $0xe1c0; BYTE $0x07 // shl cl, 7 @@ -24665,18 +25816,18 @@ LBB5_89: WORD $0x4888; BYTE $0x03 // mov byte [rax + 3], cl LONG $0x20c68349 // add r14, 32 LONG $0x04c08348 // add rax, 4 - QUAD $0x0000008024848948 // mov qword [rsp + 128], rax - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 - JNE LBB5_89 + LONG $0x24448948; BYTE $0x50 // mov qword [rsp + 80], rax + QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 + JNE LBB5_90 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - JMP LBB5_91 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + JMP LBB5_92 -LBB5_27: +LBB5_28: WORD $0xff83; BYTE $0x07 // cmp edi, 7 - JE LBB5_144 + JE LBB5_145 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB5_199 + JNE LBB5_198 WORD $0x8b4c; BYTE $0x1e // mov r11, qword [rsi] LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 @@ -24686,11 +25837,11 @@ LBB5_27: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_33 + JE LBB5_34 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] -LBB5_31: +LBB5_32: WORD $0x3b4d; BYTE $0x1e // cmp r11, qword [r14] LONG $0x08768d4d // lea r14, [r14 + 8] WORD $0x950f; BYTE $0xd2 // setne dl @@ -24711,52 +25862,52 @@ LBB5_31: LONG $0x303c8841 // mov byte [r8 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_31 + JNE LBB5_32 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_33: +LBB5_34: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_37 + JL LBB5_38 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 -LBB5_35: +LBB5_36: LONG $0xf89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 248] LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xf09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 240] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0xe89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 232] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0xe09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 224] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0xd89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 216] + LONG $0xe09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 224] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0xd89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 216] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0xd09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 208] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0xc89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 200] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0xb89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 184] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] LONG $0xb09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 176] - QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0xa89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 168] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0xa09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 160] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x989e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 152] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x909e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 144] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x889e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 136] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x785e3b4d // cmp r11, qword [r14 + 120] LONG $0xd0950f41 // setne r8b LONG $0x705e3b4d // cmp r11, qword [r14 + 112] QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x685e3b4d // cmp r11, qword [r14 + 104] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x605e3b4d // cmp r11, qword [r14 + 96] LONG $0xd7950f40 // setne dil LONG $0x585e3b4d // cmp r11, qword [r14 + 88] @@ -24776,24 +25927,24 @@ LBB5_35: LONG $0x185e3b4d // cmp r11, qword [r14 + 24] LONG $0xd7950f41 // setne r15b LONG $0x105e3b4d // cmp r11, qword [r14 + 16] - LONG $0xd5950f41 // setne r13b + LONG $0xd4950f41 // setne r12b WORD $0x3b4d; BYTE $0x1e // cmp r11, qword [r14] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x085e3b4d // cmp r11, qword [r14 + 8] - LONG $0xd4950f41 // setne r12b + LONG $0xd5950f41 // setne r13b LONG $0x405e3b4d // cmp r11, qword [r14 + 64] - QUAD $0x000000c02494950f // setne byte [rsp + 192] - LONG $0x809e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 128] QUAD $0x000000b02494950f // setne byte [rsp + 176] + LONG $0x809e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 128] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0xc09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 192] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - WORD $0x0045; BYTE $0xe4 // add r12b, r12b - QUAD $0x0000009824a40244 // add r12b, byte [rsp + 152] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x0045; BYTE $0xed // add r13b, r13b + QUAD $0x000000c024ac0244 // add r13b, byte [rsp + 192] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0845; BYTE $0xec // or r12b, r13b + LONG $0x246c8b4c; BYTE $0x08 // mov r13, qword [rsp + 8] LONG $0x03e7c041 // shl r15b, 3 - WORD $0x0845; BYTE $0xef // or r15b, r13b + WORD $0x0845; BYTE $0xe7 // or r15b, r12b WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0844; BYTE $0xfa // or dl, r15b WORD $0xe1c0; BYTE $0x05 // shl cl, 5 @@ -24802,16 +25953,16 @@ LBB5_35: WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl - LONG $0x24048841 // mov byte [r12], al + LONG $0x00458841 // mov byte [r13], al WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] + QUAD $0x000000b024b40240 // add sil, byte [rsp + 176] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xf1 // or r9b, sil LONG $0x03e2c041 // shl r10b, 3 WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e7c040 // shl dil, 4 WORD $0x0844; BYTE $0xd7 // or dil, r10b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf8 // or al, dil QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] @@ -24819,51 +25970,51 @@ LBB5_35: LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x01458845 // mov byte [r13 + 1], r8b + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0xb0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 176] + LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] + LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl - LONG $0x24448841; BYTE $0x02 // mov byte [r12 + 2], al + LONG $0x02458841 // mov byte [r13 + 2], al LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x60244402 // add al, byte [rsp + 96] + LONG $0x70244402 // add al, byte [rsp + 112] WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -24873,30 +26024,30 @@ LBB5_35: WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl - LONG $0x24448841; BYTE $0x03 // mov byte [r12 + 3], al + LONG $0x03458841 // mov byte [r13 + 3], al LONG $0x00c68149; WORD $0x0001; BYTE $0x00 // add r14, 256 - LONG $0x04c48349 // add r12, 4 - LONG $0x2464894c; BYTE $0x08 // mov qword [rsp + 8], r12 + LONG $0x04c58349 // add r13, 4 + LONG $0x246c894c; BYTE $0x08 // mov qword [rsp + 8], r13 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB5_35 + JNE LBB5_36 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] -LBB5_37: +LBB5_38: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JE LBB5_39 + JE LBB5_40 WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] -LBB5_157: +LBB5_158: WORD $0x3b4d; BYTE $0x1e // cmp r11, qword [r14] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -24924,12 +26075,12 @@ LBB5_157: WORD $0xd330 // xor bl, dl LONG $0x371c8841 // mov byte [r15 + rsi], bl WORD $0x3949; BYTE $0xf9 // cmp r9, rdi - JNE LBB5_157 - JMP LBB5_40 + JNE LBB5_158 + JMP LBB5_41 -LBB5_56: +LBB5_57: WORD $0x068a // mov al, byte [rsi] - LONG $0x28244488 // mov byte [rsp + 40], al + LONG $0x40244488 // mov byte [rsp + 64], al LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 LONG $0xd7490f4d // cmovns r10, r15 @@ -24938,12 +26089,12 @@ LBB5_56: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_60 + JE LBB5_61 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] -LBB5_58: - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] +LBB5_59: + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] WORD $0x3a41; BYTE $0x0e // cmp cl, byte [r14] LONG $0x01768d4d // lea r14, [r14 + 1] WORD $0x950f; BYTE $0xd3 // setne bl @@ -24964,71 +26115,71 @@ LBB5_58: LONG $0x323c8840 // mov byte [rdx + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_58 + JNE LBB5_59 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_60: +LBB5_61: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_61 + JL LBB5_62 LONG $0x10fa8349 // cmp r10, 16 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000f82494894c // mov qword [rsp + 248], r10 - JB LBB5_63 + QUAD $0x000001082494894c // mov qword [rsp + 264], r10 + JB LBB5_64 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x014c; BYTE $0xf0 // add rax, r14 LONG $0x24443948; BYTE $0x08 // cmp qword [rsp + 8], rax - JAE LBB5_66 + JAE LBB5_67 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] LONG $0x90048d4a // lea rax, [rax + 4*r10] WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB5_66 + JAE LBB5_67 -LBB5_63: +LBB5_64: WORD $0xc031 // xor eax, eax - QUAD $0x000000d824848948 // mov qword [rsp + 216], rax + QUAD $0x000000e824848948 // mov qword [rsp + 232], rax LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - LONG $0x24448948; BYTE $0x58 // mov qword [rsp + 88], rax - -LBB5_69: - QUAD $0x000000d824942b4c // sub r10, qword [rsp + 216] - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax LBB5_70: - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + QUAD $0x000000e824942b4c // sub r10, qword [rsp + 232] + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 + +LBB5_71: + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] LONG $0x1f463a41 // cmp al, byte [r14 + 31] LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x1e463a41 // cmp al, byte [r14 + 30] LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x1d463a41 // cmp al, byte [r14 + 29] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] - LONG $0x1c463a41 // cmp al, byte [r14 + 28] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x1b463a41 // cmp al, byte [r14 + 27] + LONG $0x1c463a41 // cmp al, byte [r14 + 28] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x1b463a41 // cmp al, byte [r14 + 27] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x1a463a41 // cmp al, byte [r14 + 26] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0x19463a41 // cmp al, byte [r14 + 25] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x17463a41 // cmp al, byte [r14 + 23] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x16463a41 // cmp al, byte [r14 + 22] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] - LONG $0x15463a41 // cmp al, byte [r14 + 21] + LONG $0x16463a41 // cmp al, byte [r14 + 22] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x15463a41 // cmp al, byte [r14 + 21] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x14463a41 // cmp al, byte [r14 + 20] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x13463a41 // cmp al, byte [r14 + 19] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x12463a41 // cmp al, byte [r14 + 18] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x11463a41 // cmp al, byte [r14 + 17] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x0f463a41 // cmp al, byte [r14 + 15] LONG $0xd2950f41 // setne r10b LONG $0x0e463a41 // cmp al, byte [r14 + 14] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x0d463a41 // cmp al, byte [r14 + 13] LONG $0xd5950f41 // setne r13b LONG $0x0c463a41 // cmp al, byte [r14 + 12] @@ -25042,7 +26193,7 @@ LBB5_70: LONG $0x07463a41 // cmp al, byte [r14 + 7] LONG $0xd6950f40 // setne sil LONG $0x06463a41 // cmp al, byte [r14 + 6] - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x05463a41 // cmp al, byte [r14 + 5] LONG $0xd1950f41 // setne r9b LONG $0x04463a41 // cmp al, byte [r14 + 4] @@ -25056,9 +26207,9 @@ LBB5_70: LONG $0x01463a41 // cmp al, byte [r14 + 1] WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x08463a41 // cmp al, byte [r14 + 8] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x10463a41 // cmp al, byte [r14 + 16] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x18463a41 // cmp al, byte [r14 + 24] QUAD $0x000000882494950f // setne byte [rsp + 136] WORD $0xc900 // add cl, cl @@ -25071,15 +26222,15 @@ LBB5_70: WORD $0x0841; BYTE $0xf8 // or r8b, dil LONG $0x05e1c041 // shl r9b, 5 WORD $0x0845; BYTE $0xc1 // or r9b, r8b - QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e6c040 // shl sil, 7 WORD $0x0840; BYTE $0xc6 // or sil, al WORD $0x0844; BYTE $0xce // or sil, r9b - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] WORD $0x8840; BYTE $0x30 // mov byte [rax], sil WORD $0x0045; BYTE $0xdb // add r11b, r11b - QUAD $0x00000098249c0244 // add r11b, byte [rsp + 152] + QUAD $0x000000c0249c0244 // add r11b, byte [rsp + 192] WORD $0xe3c0; BYTE $0x02 // shl bl, 2 WORD $0x0844; BYTE $0xdb // or bl, r11b LONG $0x03e7c041 // shl r15b, 3 @@ -25088,35 +26239,35 @@ LBB5_70: WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x05e5c041 // shl r13b, 5 WORD $0x0845; BYTE $0xe5 // or r13b, r12b - QUAD $0x00000090248cb60f // movzx ecx, byte [rsp + 144] + QUAD $0x00000098248cb60f // movzx ecx, byte [rsp + 152] WORD $0xe1c0; BYTE $0x06 // shl cl, 6 LONG $0x07e2c041 // shl r10b, 7 WORD $0x0841; BYTE $0xca // or r10b, cl WORD $0x0845; BYTE $0xea // or r10b, r13b LONG $0x01508844 // mov byte [rax + 1], r10b - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xc900 // add cl, cl - LONG $0xb0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 176] + LONG $0x90248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 144] WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x68 // movzx ecx, byte [rsp + 104] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + LONG $0x245cb60f; BYTE $0x48 // movzx ebx, byte [rsp + 72] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - QUAD $0x00000080248cb60f // movzx ecx, byte [rsp + 128] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0xd908 // or cl, bl WORD $0xd108 // or cl, dl @@ -25129,15 +26280,15 @@ LBB5_70: WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx @@ -25150,14 +26301,14 @@ LBB5_70: WORD $0x4888; BYTE $0x03 // mov byte [rax + 3], cl LONG $0x20c68349 // add r14, 32 LONG $0x04c08348 // add rax, 4 - LONG $0x24448948; BYTE $0x58 // mov qword [rsp + 88], rax - QUAD $0x000000e024848348; BYTE $0xff // add qword [rsp + 224], -1 - JNE LBB5_70 + LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 + JNE LBB5_71 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - JMP LBB5_72 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + JMP LBB5_73 -LBB5_144: +LBB5_145: WORD $0x8b44; BYTE $0x1e // mov r11d, dword [rsi] LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 @@ -25167,11 +26318,11 @@ LBB5_144: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_148 + JE LBB5_149 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] -LBB5_146: +LBB5_147: WORD $0x3b45; BYTE $0x1e // cmp r11d, dword [r14] LONG $0x04768d4d // lea r14, [r14 + 4] WORD $0x950f; BYTE $0xd2 // setne dl @@ -25192,52 +26343,52 @@ LBB5_146: LONG $0x303c8841 // mov byte [r8 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_146 + JNE LBB5_147 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_148: +LBB5_149: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_152 + JL LBB5_153 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 -LBB5_150: +LBB5_151: LONG $0x7c5e3b45 // cmp r11d, dword [r14 + 124] LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0x785e3b45 // cmp r11d, dword [r14 + 120] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x745e3b45 // cmp r11d, dword [r14 + 116] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x705e3b45 // cmp r11d, dword [r14 + 112] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x6c5e3b45 // cmp r11d, dword [r14 + 108] + LONG $0x705e3b45 // cmp r11d, dword [r14 + 112] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x6c5e3b45 // cmp r11d, dword [r14 + 108] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x685e3b45 // cmp r11d, dword [r14 + 104] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0x645e3b45 // cmp r11d, dword [r14 + 100] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x5c5e3b45 // cmp r11d, dword [r14 + 92] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] LONG $0x585e3b45 // cmp r11d, dword [r14 + 88] - QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0x545e3b45 // cmp r11d, dword [r14 + 84] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x505e3b45 // cmp r11d, dword [r14 + 80] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x4c5e3b45 // cmp r11d, dword [r14 + 76] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x485e3b45 // cmp r11d, dword [r14 + 72] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x445e3b45 // cmp r11d, dword [r14 + 68] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x3c5e3b45 // cmp r11d, dword [r14 + 60] LONG $0xd0950f41 // setne r8b LONG $0x385e3b45 // cmp r11d, dword [r14 + 56] QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x345e3b45 // cmp r11d, dword [r14 + 52] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x305e3b45 // cmp r11d, dword [r14 + 48] LONG $0xd7950f40 // setne dil LONG $0x2c5e3b45 // cmp r11d, dword [r14 + 44] @@ -25257,24 +26408,24 @@ LBB5_150: LONG $0x0c5e3b45 // cmp r11d, dword [r14 + 12] LONG $0xd7950f41 // setne r15b LONG $0x085e3b45 // cmp r11d, dword [r14 + 8] - LONG $0xd5950f41 // setne r13b + LONG $0xd4950f41 // setne r12b WORD $0x3b45; BYTE $0x1e // cmp r11d, dword [r14] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x045e3b45 // cmp r11d, dword [r14 + 4] - LONG $0xd4950f41 // setne r12b + LONG $0xd5950f41 // setne r13b LONG $0x205e3b45 // cmp r11d, dword [r14 + 32] - QUAD $0x000000c02494950f // setne byte [rsp + 192] - LONG $0x405e3b45 // cmp r11d, dword [r14 + 64] QUAD $0x000000b02494950f // setne byte [rsp + 176] + LONG $0x405e3b45 // cmp r11d, dword [r14 + 64] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x605e3b45 // cmp r11d, dword [r14 + 96] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - WORD $0x0045; BYTE $0xe4 // add r12b, r12b - QUAD $0x0000009824a40244 // add r12b, byte [rsp + 152] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x0045; BYTE $0xed // add r13b, r13b + QUAD $0x000000c024ac0244 // add r13b, byte [rsp + 192] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0845; BYTE $0xec // or r12b, r13b + LONG $0x246c8b4c; BYTE $0x08 // mov r13, qword [rsp + 8] LONG $0x03e7c041 // shl r15b, 3 - WORD $0x0845; BYTE $0xef // or r15b, r13b + WORD $0x0845; BYTE $0xe7 // or r15b, r12b WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0844; BYTE $0xfa // or dl, r15b WORD $0xe1c0; BYTE $0x05 // shl cl, 5 @@ -25283,16 +26434,16 @@ LBB5_150: WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl - LONG $0x24048841 // mov byte [r12], al + LONG $0x00458841 // mov byte [r13], al WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] + QUAD $0x000000b024b40240 // add sil, byte [rsp + 176] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xf1 // or r9b, sil LONG $0x03e2c041 // shl r10b, 3 WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e7c040 // shl dil, 4 WORD $0x0844; BYTE $0xd7 // or dil, r10b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf8 // or al, dil QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] @@ -25300,51 +26451,51 @@ LBB5_150: LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x01458845 // mov byte [r13 + 1], r8b + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0xb0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 176] + LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] + LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl - LONG $0x24448841; BYTE $0x02 // mov byte [r12 + 2], al + LONG $0x02458841 // mov byte [r13 + 2], al LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x60244402 // add al, byte [rsp + 96] + LONG $0x70244402 // add al, byte [rsp + 112] WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -25354,30 +26505,30 @@ LBB5_150: WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl - LONG $0x24448841; BYTE $0x03 // mov byte [r12 + 3], al + LONG $0x03458841 // mov byte [r13 + 3], al LONG $0x80ee8349 // sub r14, -128 - LONG $0x04c48349 // add r12, 4 - LONG $0x2464894c; BYTE $0x08 // mov qword [rsp + 8], r12 + LONG $0x04c58349 // add r13, 4 + LONG $0x246c894c; BYTE $0x08 // mov qword [rsp + 8], r13 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB5_150 + JNE LBB5_151 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] -LBB5_152: +LBB5_153: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB5_154 + JNE LBB5_155 LBB5_23: WORD $0xff31 // xor edi, edi JMP LBB5_24 -LBB5_99: +LBB5_100: LONG $0x1eb70f44 // movzx r11d, word [rsi] LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 @@ -25387,11 +26538,11 @@ LBB5_99: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_103 + JE LBB5_104 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] -LBB5_101: +LBB5_102: LONG $0x1e3b4566 // cmp r11w, word [r14] LONG $0x02768d4d // lea r14, [r14 + 2] WORD $0x950f; BYTE $0xd3 // setne bl @@ -25412,72 +26563,72 @@ LBB5_101: LONG $0x323c8840 // mov byte [rdx + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_101 + JNE LBB5_102 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_103: +LBB5_104: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_104 + JL LBB5_105 LONG $0x08fa8349 // cmp r10, 8 LONG $0x245c8944; BYTE $0x10 // mov dword [rsp + 16], r11d QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 - JB LBB5_106 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 + JB LBB5_107 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x06e0c148 // shl rax, 6 WORD $0x014c; BYTE $0xf0 // add rax, r14 LONG $0x24443948; BYTE $0x08 // cmp qword [rsp + 8], rax - JAE LBB5_109 + JAE LBB5_110 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] LONG $0x90048d4a // lea rax, [rax + 4*r10] WORD $0x394c; BYTE $0xf0 // cmp rax, r14 - JBE LBB5_109 + JBE LBB5_110 -LBB5_106: +LBB5_107: WORD $0xc031 // xor eax, eax - LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax + LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] -LBB5_112: - LONG $0x24542b4c; BYTE $0x18 // sub r10, qword [rsp + 24] +LBB5_113: + LONG $0x24542b4c; BYTE $0x28 // sub r10, qword [rsp + 40] QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 -LBB5_113: +LBB5_114: LONG $0x5e3b4566; BYTE $0x3e // cmp r11w, word [r14 + 62] LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x5e3b4566; BYTE $0x3c // cmp r11w, word [r14 + 60] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x5e3b4566; BYTE $0x3a // cmp r11w, word [r14 + 58] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x5e3b4566; BYTE $0x38 // cmp r11w, word [r14 + 56] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x5e3b4566; BYTE $0x36 // cmp r11w, word [r14 + 54] + LONG $0x5e3b4566; BYTE $0x38 // cmp r11w, word [r14 + 56] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x5e3b4566; BYTE $0x36 // cmp r11w, word [r14 + 54] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x5e3b4566; BYTE $0x34 // cmp r11w, word [r14 + 52] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0x5e3b4566; BYTE $0x32 // cmp r11w, word [r14 + 50] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x5e3b4566; BYTE $0x2e // cmp r11w, word [r14 + 46] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x5e3b4566; BYTE $0x2c // cmp r11w, word [r14 + 44] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x5e3b4566; BYTE $0x2c // cmp r11w, word [r14 + 44] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x5e3b4566; BYTE $0x2a // cmp r11w, word [r14 + 42] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x5e3b4566; BYTE $0x28 // cmp r11w, word [r14 + 40] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x5e3b4566; BYTE $0x28 // cmp r11w, word [r14 + 40] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x5e3b4566; BYTE $0x26 // cmp r11w, word [r14 + 38] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x5e3b4566; BYTE $0x24 // cmp r11w, word [r14 + 36] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x5e3b4566; BYTE $0x22 // cmp r11w, word [r14 + 34] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x5e3b4566; BYTE $0x1e // cmp r11w, word [r14 + 30] LONG $0xd1950f41 // setne r9b LONG $0x5e3b4566; BYTE $0x1c // cmp r11w, word [r14 + 28] QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x5e3b4566; BYTE $0x1a // cmp r11w, word [r14 + 26] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x5e3b4566; BYTE $0x18 // cmp r11w, word [r14 + 24] LONG $0xd5950f41 // setne r13b LONG $0x5e3b4566; BYTE $0x16 // cmp r11w, word [r14 + 22] @@ -25492,7 +26643,7 @@ LBB5_113: WORD $0x950f; BYTE $0xd2 // setne dl LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x463b4166; BYTE $0x0c // cmp ax, word [r14 + 12] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x463b4166; BYTE $0x0a // cmp ax, word [r14 + 10] LONG $0xd0950f41 // setne r8b @@ -25507,7 +26658,7 @@ LBB5_113: WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x063b4166 // cmp ax, word [r14] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x463b4166; BYTE $0x02 // cmp ax, word [r14 + 2] WORD $0x950f; BYTE $0xd0 // setne al @@ -25515,15 +26666,15 @@ LBB5_113: LONG $0x24648b44; BYTE $0x10 // mov r12d, dword [rsp + 16] LONG $0x663b4566; BYTE $0x10 // cmp r12w, word [r14 + 16] WORD $0x8949; BYTE $0xdc // mov r12, rbx - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x10245c8b // mov ebx, dword [rsp + 16] LONG $0x5e3b4166; BYTE $0x20 // cmp bx, word [r14 + 32] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x10245c8b // mov ebx, dword [rsp + 16] LONG $0x5e3b4166; BYTE $0x30 // cmp bx, word [r14 + 48] WORD $0x950f; BYTE $0xd3 // setne bl WORD $0xc000 // add al, al - LONG $0x98248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 152] + LONG $0xc0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 192] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xc108 // or cl, al LONG $0x03e6c040 // shl sil, 3 @@ -25532,14 +26683,14 @@ LBB5_113: WORD $0x0840; BYTE $0xf7 // or dil, sil LONG $0x05e0c041 // shl r8b, 5 WORD $0x0841; BYTE $0xf8 // or r8b, dil - QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe2c0; BYTE $0x07 // shl dl, 7 WORD $0xc208 // or dl, al WORD $0x0844; BYTE $0xc2 // or dl, r8b LONG $0x24148841 // mov byte [r12], dl WORD $0x0045; BYTE $0xd2 // add r10b, r10b - QUAD $0x000000c024940244 // add r10b, byte [rsp + 192] + QUAD $0x000000b024940244 // add r10b, byte [rsp + 176] LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xd3 // or r11b, r10b LONG $0x03e7c041 // shl r15b, 3 @@ -25547,7 +26698,7 @@ LBB5_113: LONG $0x245c8b44; BYTE $0x10 // mov r11d, dword [rsp + 16] LONG $0x04e5c041 // shl r13b, 4 WORD $0x0845; BYTE $0xfd // or r13b, r15b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xe8 // or al, r13b QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] @@ -25556,29 +26707,29 @@ LBB5_113: WORD $0x0841; BYTE $0xc9 // or r9b, cl WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x244c8845; BYTE $0x01 // mov byte [r12 + 1], r9b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] + LONG $0x98248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 152] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl @@ -25591,15 +26742,15 @@ LBB5_113: WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -25613,12 +26764,12 @@ LBB5_113: LONG $0x40c68349 // add r14, 64 LONG $0x04c48349 // add r12, 4 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB5_113 + JNE LBB5_114 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] - JMP LBB5_115 + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + JMP LBB5_116 -LBB5_122: +LBB5_123: LONG $0x1eb70f44 // movzx r11d, word [rsi] LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 @@ -25628,11 +26779,11 @@ LBB5_122: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_126 + JE LBB5_127 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] -LBB5_124: +LBB5_125: LONG $0x1e3b4566 // cmp r11w, word [r14] LONG $0x02768d4d // lea r14, [r14 + 2] WORD $0x950f; BYTE $0xd3 // setne bl @@ -25653,72 +26804,72 @@ LBB5_124: LONG $0x323c8840 // mov byte [rdx + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_124 + JNE LBB5_125 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_126: +LBB5_127: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_127 + JL LBB5_128 LONG $0x08fa8349 // cmp r10, 8 LONG $0x245c8944; BYTE $0x10 // mov dword [rsp + 16], r11d QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 - JB LBB5_129 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 + JB LBB5_130 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x06e0c148 // shl rax, 6 WORD $0x014c; BYTE $0xf0 // add rax, r14 LONG $0x24443948; BYTE $0x08 // cmp qword [rsp + 8], rax - JAE LBB5_132 + JAE LBB5_133 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] LONG $0x90048d4a // lea rax, [rax + 4*r10] WORD $0x394c; BYTE $0xf0 // cmp rax, r14 - JBE LBB5_132 + JBE LBB5_133 -LBB5_129: +LBB5_130: WORD $0xc031 // xor eax, eax - LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax + LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] -LBB5_135: - LONG $0x24542b4c; BYTE $0x18 // sub r10, qword [rsp + 24] +LBB5_136: + LONG $0x24542b4c; BYTE $0x28 // sub r10, qword [rsp + 40] QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 -LBB5_136: +LBB5_137: LONG $0x5e3b4566; BYTE $0x3e // cmp r11w, word [r14 + 62] LONG $0x2454950f; BYTE $0x08 // setne byte [rsp + 8] LONG $0x5e3b4566; BYTE $0x3c // cmp r11w, word [r14 + 60] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0x5e3b4566; BYTE $0x3a // cmp r11w, word [r14 + 58] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0x5e3b4566; BYTE $0x38 // cmp r11w, word [r14 + 56] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0x5e3b4566; BYTE $0x36 // cmp r11w, word [r14 + 54] + LONG $0x5e3b4566; BYTE $0x38 // cmp r11w, word [r14 + 56] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0x5e3b4566; BYTE $0x36 // cmp r11w, word [r14 + 54] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0x5e3b4566; BYTE $0x34 // cmp r11w, word [r14 + 52] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0x5e3b4566; BYTE $0x32 // cmp r11w, word [r14 + 50] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0x5e3b4566; BYTE $0x2e // cmp r11w, word [r14 + 46] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] - LONG $0x5e3b4566; BYTE $0x2c // cmp r11w, word [r14 + 44] LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x5e3b4566; BYTE $0x2c // cmp r11w, word [r14 + 44] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0x5e3b4566; BYTE $0x2a // cmp r11w, word [r14 + 42] - QUAD $0x000000802494950f // setne byte [rsp + 128] - LONG $0x5e3b4566; BYTE $0x28 // cmp r11w, word [r14 + 40] LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x5e3b4566; BYTE $0x28 // cmp r11w, word [r14 + 40] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x5e3b4566; BYTE $0x26 // cmp r11w, word [r14 + 38] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x5e3b4566; BYTE $0x24 // cmp r11w, word [r14 + 36] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x5e3b4566; BYTE $0x22 // cmp r11w, word [r14 + 34] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x5e3b4566; BYTE $0x1e // cmp r11w, word [r14 + 30] LONG $0xd1950f41 // setne r9b LONG $0x5e3b4566; BYTE $0x1c // cmp r11w, word [r14 + 28] QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x5e3b4566; BYTE $0x1a // cmp r11w, word [r14 + 26] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] LONG $0x5e3b4566; BYTE $0x18 // cmp r11w, word [r14 + 24] LONG $0xd5950f41 // setne r13b LONG $0x5e3b4566; BYTE $0x16 // cmp r11w, word [r14 + 22] @@ -25733,7 +26884,7 @@ LBB5_136: WORD $0x950f; BYTE $0xd2 // setne dl LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x463b4166; BYTE $0x0c // cmp ax, word [r14 + 12] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x463b4166; BYTE $0x0a // cmp ax, word [r14 + 10] LONG $0xd0950f41 // setne r8b @@ -25748,7 +26899,7 @@ LBB5_136: WORD $0x950f; BYTE $0xd1 // setne cl LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x063b4166 // cmp ax, word [r14] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x1024448b // mov eax, dword [rsp + 16] LONG $0x463b4166; BYTE $0x02 // cmp ax, word [r14 + 2] WORD $0x950f; BYTE $0xd0 // setne al @@ -25756,15 +26907,15 @@ LBB5_136: LONG $0x24648b44; BYTE $0x10 // mov r12d, dword [rsp + 16] LONG $0x663b4566; BYTE $0x10 // cmp r12w, word [r14 + 16] WORD $0x8949; BYTE $0xdc // mov r12, rbx - QUAD $0x000000c02494950f // setne byte [rsp + 192] + QUAD $0x000000b02494950f // setne byte [rsp + 176] LONG $0x10245c8b // mov ebx, dword [rsp + 16] LONG $0x5e3b4166; BYTE $0x20 // cmp bx, word [r14 + 32] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x10245c8b // mov ebx, dword [rsp + 16] LONG $0x5e3b4166; BYTE $0x30 // cmp bx, word [r14 + 48] WORD $0x950f; BYTE $0xd3 // setne bl WORD $0xc000 // add al, al - LONG $0x98248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 152] + LONG $0xc0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 192] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xc108 // or cl, al LONG $0x03e6c040 // shl sil, 3 @@ -25773,14 +26924,14 @@ LBB5_136: WORD $0x0840; BYTE $0xf7 // or dil, sil LONG $0x05e0c041 // shl r8b, 5 WORD $0x0841; BYTE $0xf8 // or r8b, dil - QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe2c0; BYTE $0x07 // shl dl, 7 WORD $0xc208 // or dl, al WORD $0x0844; BYTE $0xc2 // or dl, r8b LONG $0x24148841 // mov byte [r12], dl WORD $0x0045; BYTE $0xd2 // add r10b, r10b - QUAD $0x000000c024940244 // add r10b, byte [rsp + 192] + QUAD $0x000000b024940244 // add r10b, byte [rsp + 176] LONG $0x02e3c041 // shl r11b, 2 WORD $0x0845; BYTE $0xd3 // or r11b, r10b LONG $0x03e7c041 // shl r15b, 3 @@ -25788,7 +26939,7 @@ LBB5_136: LONG $0x245c8b44; BYTE $0x10 // mov r11d, dword [rsp + 16] LONG $0x04e5c041 // shl r13b, 4 WORD $0x0845; BYTE $0xfd // or r13b, r15b - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0844; BYTE $0xe8 // or al, r13b QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] @@ -25797,29 +26948,29 @@ LBB5_136: WORD $0x0841; BYTE $0xc9 // or r9b, cl WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x244c8845; BYTE $0x01 // mov byte [r12 + 1], r9b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] + LONG $0x98248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 152] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl @@ -25832,15 +26983,15 @@ LBB5_136: WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -25854,12 +27005,12 @@ LBB5_136: LONG $0x40c68349 // add r14, 64 LONG $0x04c48349 // add r12, 4 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB5_136 + JNE LBB5_137 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] - JMP LBB5_138 + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + JMP LBB5_139 -LBB5_158: +LBB5_159: WORD $0x8b4c; BYTE $0x1e // mov r11, qword [rsi] LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 @@ -25869,11 +27020,11 @@ LBB5_158: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_162 + JE LBB5_163 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] -LBB5_160: +LBB5_161: WORD $0x3b4d; BYTE $0x1e // cmp r11, qword [r14] LONG $0x08768d4d // lea r14, [r14 + 8] WORD $0x950f; BYTE $0xd2 // setne dl @@ -25894,52 +27045,52 @@ LBB5_160: LONG $0x303c8841 // mov byte [r8 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_160 + JNE LBB5_161 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_162: +LBB5_163: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_166 + JL LBB5_167 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000e02494894c // mov qword [rsp + 224], r10 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 -LBB5_164: +LBB5_165: LONG $0xf89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 248] LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] LONG $0xf09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 240] LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] LONG $0xe89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 232] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] - LONG $0xe09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 224] LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] - LONG $0xd89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 216] + LONG $0xe09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 224] LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + LONG $0xd89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 216] + LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] LONG $0xd09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 208] LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] LONG $0xc89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 200] LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] LONG $0xb89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 184] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] LONG $0xb09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 176] - QUAD $0x000000802494950f // setne byte [rsp + 128] + LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] LONG $0xa89e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 168] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] LONG $0xa09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 160] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] LONG $0x989e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 152] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] LONG $0x909e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 144] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + QUAD $0x000000802494950f // setne byte [rsp + 128] LONG $0x889e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 136] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] LONG $0x785e3b4d // cmp r11, qword [r14 + 120] LONG $0xd0950f41 // setne r8b LONG $0x705e3b4d // cmp r11, qword [r14 + 112] QUAD $0x000000882494950f // setne byte [rsp + 136] LONG $0x685e3b4d // cmp r11, qword [r14 + 104] - QUAD $0x000000902494950f // setne byte [rsp + 144] + QUAD $0x000000982494950f // setne byte [rsp + 152] LONG $0x605e3b4d // cmp r11, qword [r14 + 96] LONG $0xd7950f40 // setne dil LONG $0x585e3b4d // cmp r11, qword [r14 + 88] @@ -25959,24 +27110,24 @@ LBB5_164: LONG $0x185e3b4d // cmp r11, qword [r14 + 24] LONG $0xd7950f41 // setne r15b LONG $0x105e3b4d // cmp r11, qword [r14 + 16] - LONG $0xd5950f41 // setne r13b + LONG $0xd4950f41 // setne r12b WORD $0x3b4d; BYTE $0x1e // cmp r11, qword [r14] - QUAD $0x000000982494950f // setne byte [rsp + 152] + QUAD $0x000000c02494950f // setne byte [rsp + 192] LONG $0x085e3b4d // cmp r11, qword [r14 + 8] - LONG $0xd4950f41 // setne r12b + LONG $0xd5950f41 // setne r13b LONG $0x405e3b4d // cmp r11, qword [r14 + 64] - QUAD $0x000000c02494950f // setne byte [rsp + 192] - LONG $0x809e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 128] QUAD $0x000000b02494950f // setne byte [rsp + 176] + LONG $0x809e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 128] + QUAD $0x000000902494950f // setne byte [rsp + 144] LONG $0xc09e3b4d; WORD $0x0000; BYTE $0x00 // cmp r11, qword [r14 + 192] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] - WORD $0x0045; BYTE $0xe4 // add r12b, r12b - QUAD $0x0000009824a40244 // add r12b, byte [rsp + 152] - LONG $0x02e5c041 // shl r13b, 2 - WORD $0x0845; BYTE $0xe5 // or r13b, r12b - LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] + LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x0045; BYTE $0xed // add r13b, r13b + QUAD $0x000000c024ac0244 // add r13b, byte [rsp + 192] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0845; BYTE $0xec // or r12b, r13b + LONG $0x246c8b4c; BYTE $0x08 // mov r13, qword [rsp + 8] LONG $0x03e7c041 // shl r15b, 3 - WORD $0x0845; BYTE $0xef // or r15b, r13b + WORD $0x0845; BYTE $0xe7 // or r15b, r12b WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0844; BYTE $0xfa // or dl, r15b WORD $0xe1c0; BYTE $0x05 // shl cl, 5 @@ -25985,16 +27136,16 @@ LBB5_164: WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd808 // or al, bl WORD $0xc808 // or al, cl - LONG $0x24048841 // mov byte [r12], al + LONG $0x00458841 // mov byte [r13], al WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] + QUAD $0x000000b024b40240 // add sil, byte [rsp + 176] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xf1 // or r9b, sil LONG $0x03e2c041 // shl r10b, 3 WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x04e7c040 // shl dil, 4 WORD $0x0844; BYTE $0xd7 // or dil, r10b - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + QUAD $0x000000982484b60f // movzx eax, byte [rsp + 152] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf8 // or al, dil QUAD $0x00000088248cb60f // movzx ecx, byte [rsp + 136] @@ -26002,51 +27153,51 @@ LBB5_164: LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xc8 // or r8b, cl WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x24448845; BYTE $0x01 // mov byte [r12 + 1], r8b - LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + LONG $0x01458845 // mov byte [r13 + 1], r8b + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] WORD $0xc000 // add al, al - LONG $0xb0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 176] + LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] + LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl - LONG $0x24448841; BYTE $0x02 // mov byte [r12 + 2], al + LONG $0x02458841 // mov byte [r13 + 2], al LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xc000 // add al, al - LONG $0x60244402 // add al, byte [rsp + 96] + LONG $0x70244402 // add al, byte [rsp + 112] WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xc808 // or al, cl WORD $0xc189 // mov ecx, eax @@ -26056,30 +27207,30 @@ LBB5_164: WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl WORD $0xc808 // or al, cl - LONG $0x24448841; BYTE $0x03 // mov byte [r12 + 3], al + LONG $0x03458841 // mov byte [r13 + 3], al LONG $0x00c68149; WORD $0x0001; BYTE $0x00 // add r14, 256 - LONG $0x04c48349 // add r12, 4 - LONG $0x2464894c; BYTE $0x08 // mov qword [rsp + 8], r12 + LONG $0x04c58349 // add r13, 4 + LONG $0x246c894c; BYTE $0x08 // mov qword [rsp + 8], r13 QUAD $0x000000a824848348; BYTE $0xff // add qword [rsp + 168], -1 - JNE LBB5_164 + JNE LBB5_165 QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] -LBB5_166: +LBB5_167: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB5_168 + JNE LBB5_169 -LBB5_39: +LBB5_40: WORD $0xff31 // xor edi, edi - JMP LBB5_40 + JMP LBB5_41 -LBB5_170: +LBB5_171: LONG $0x1f578d4d // lea r10, [r15 + 31] WORD $0x854d; BYTE $0xff // test r15, r15 LONG $0xd7490f4d // cmovns r10, r15 @@ -26089,14 +27240,16 @@ LBB5_170: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x06100ff3 // movss xmm0, dword [rsi] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB5_174 + JE LBB5_175 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] -LBB5_172: +LBB5_173: LONG $0x062e0f41 // ucomiss xmm0, dword [r14] LONG $0x04768d4d // lea r14, [r14 + 4] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl WORD $0xdaf6 // neg dl LONG $0x07708d48 // lea rsi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax @@ -26114,245 +27267,336 @@ LBB5_172: LONG $0x303c8841 // mov byte [r8 + rsi], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB5_172 + JNE LBB5_173 LONG $0x24448348; WORD $0x0108 // add qword [rsp + 8], 1 -LBB5_174: +LBB5_175: LONG $0x05fac149 // sar r10, 5 LONG $0x20ff8349 // cmp r15, 32 - JL LBB5_175 + JL LBB5_176 LONG $0x04fa8349 // cmp r10, 4 - JB LBB5_177 + JB LBB5_178 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x07e0c148 // shl rax, 7 WORD $0x014c; BYTE $0xf0 // add rax, r14 LONG $0x24443948; BYTE $0x08 // cmp qword [rsp + 8], rax - JAE LBB5_180 + JAE LBB5_181 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] LONG $0x90048d4a // lea rax, [rax + 4*r10] WORD $0x394c; BYTE $0xf0 // cmp rax, r14 - JBE LBB5_180 + JBE LBB5_181 -LBB5_177: +LBB5_178: WORD $0x3145; BYTE $0xc0 // xor r8d, r8d WORD $0x894c; BYTE $0xf3 // mov rbx, r14 - LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] + LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] -LBB5_183: - LONG $0x245c894c; BYTE $0x08 // mov qword [rsp + 8], r11 +LBB5_184: + LONG $0x2464894c; BYTE $0x10 // mov qword [rsp + 16], r12 QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 - QUAD $0x000000a82494894c // mov qword [rsp + 168], r10 + QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 WORD $0x294d; BYTE $0xc2 // sub r10, r8 - QUAD $0x000000982494894c // mov qword [rsp + 152], r10 + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 -LBB5_184: +LBB5_185: WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - QUAD $0x000000c02494950f // setne byte [rsp + 192] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + WORD $0x8941; BYTE $0xcd // mov r13d, ecx LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] - LONG $0xd0950f41 // setne r8b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al LONG $0x08432e0f // ucomiss xmm0, dword [rbx + 8] - LONG $0xd6950f41 // setne r14b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x18244c88 // mov byte [rsp + 24], cl LONG $0x0c432e0f // ucomiss xmm0, dword [rbx + 12] - LONG $0xd5950f41 // setne r13b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x20244c88 // mov byte [rsp + 32], cl LONG $0x10432e0f // ucomiss xmm0, dword [rbx + 16] - LONG $0x2454950f; BYTE $0x68 // setne byte [rsp + 104] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x40244c88 // mov byte [rsp + 64], cl LONG $0x14432e0f // ucomiss xmm0, dword [rbx + 20] - LONG $0x2454950f; BYTE $0x50 // setne byte [rsp + 80] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x38244c88 // mov byte [rsp + 56], cl LONG $0x18432e0f // ucomiss xmm0, dword [rbx + 24] - WORD $0x950f; BYTE $0xd0 // setne al + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x30244c88 // mov byte [rsp + 48], cl LONG $0x1c432e0f // ucomiss xmm0, dword [rbx + 28] - LONG $0xd3950f41 // setne r11b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x08244c88 // mov byte [rsp + 8], cl LONG $0x20432e0f // ucomiss xmm0, dword [rbx + 32] - QUAD $0x000000902494950f // setne byte [rsp + 144] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x28244c88 // mov byte [rsp + 40], cl LONG $0x24432e0f // ucomiss xmm0, dword [rbx + 36] - WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x50244c88 // mov byte [rsp + 80], cl LONG $0x28432e0f // ucomiss xmm0, dword [rbx + 40] - LONG $0xd6950f40 // setne sil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x48244c88 // mov byte [rsp + 72], cl LONG $0x2c432e0f // ucomiss xmm0, dword [rbx + 44] - LONG $0xd7950f40 // setne dil + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x68244c88 // mov byte [rsp + 104], cl LONG $0x30432e0f // ucomiss xmm0, dword [rbx + 48] - LONG $0xd2950f41 // setne r10b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x60244c88 // mov byte [rsp + 96], cl LONG $0x34432e0f // ucomiss xmm0, dword [rbx + 52] - LONG $0xd4950f41 // setne r12b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x80248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 128], cl LONG $0x38432e0f // ucomiss xmm0, dword [rbx + 56] - QUAD $0x000000882494950f // setne byte [rsp + 136] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x70244c88 // mov byte [rsp + 112], cl LONG $0x3c432e0f // ucomiss xmm0, dword [rbx + 60] - LONG $0xd1950f41 // setne r9b + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x78244c88 // mov byte [rsp + 120], cl LONG $0x40432e0f // ucomiss xmm0, dword [rbx + 64] - LONG $0x2454950f; BYTE $0x78 // setne byte [rsp + 120] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x58244c88 // mov byte [rsp + 88], cl LONG $0x44432e0f // ucomiss xmm0, dword [rbx + 68] - QUAD $0x000000b02494950f // setne byte [rsp + 176] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd4950f41 // setne r12b + WORD $0x0841; BYTE $0xc4 // or r12b, al LONG $0x48432e0f // ucomiss xmm0, dword [rbx + 72] - LONG $0x2454950f; BYTE $0x60 // setne byte [rsp + 96] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x88248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 136], cl LONG $0x4c432e0f // ucomiss xmm0, dword [rbx + 76] - LONG $0x2454950f; BYTE $0x70 // setne byte [rsp + 112] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x98248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 152], cl LONG $0x50432e0f // ucomiss xmm0, dword [rbx + 80] - LONG $0x2454950f; BYTE $0x48 // setne byte [rsp + 72] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0x90248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 144], cl LONG $0x54432e0f // ucomiss xmm0, dword [rbx + 84] - QUAD $0x000000802494950f // setne byte [rsp + 128] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xc0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 192], cl LONG $0x58432e0f // ucomiss xmm0, dword [rbx + 88] - LONG $0x2454950f; BYTE $0x58 // setne byte [rsp + 88] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f41 // setne r14b + WORD $0x0841; BYTE $0xc6 // or r14b, al LONG $0x5c432e0f // ucomiss xmm0, dword [rbx + 92] - LONG $0xd7950f41 // setne r15b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd0950f41 // setne r8b + WORD $0x0841; BYTE $0xc0 // or r8b, al LONG $0x60432e0f // ucomiss xmm0, dword [rbx + 96] - LONG $0x2454950f; BYTE $0x20 // setne byte [rsp + 32] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xb0248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 176], cl LONG $0x64432e0f // ucomiss xmm0, dword [rbx + 100] - LONG $0x2454950f; BYTE $0x30 // setne byte [rsp + 48] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd3950f41 // setne r11b + WORD $0x0841; BYTE $0xc3 // or r11b, al LONG $0x68432e0f // ucomiss xmm0, dword [rbx + 104] - LONG $0x2454950f; BYTE $0x38 // setne byte [rsp + 56] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd2950f41 // setne r10b + WORD $0x0841; BYTE $0xc2 // or r10b, al LONG $0x6c432e0f // ucomiss xmm0, dword [rbx + 108] - LONG $0x2454950f; BYTE $0x18 // setne byte [rsp + 24] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd1950f41 // setne r9b + WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x70432e0f // ucomiss xmm0, dword [rbx + 112] - LONG $0x2454950f; BYTE $0x28 // setne byte [rsp + 40] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f41 // setne r15b + WORD $0x0841; BYTE $0xc7 // or r15b, al LONG $0x74432e0f // ucomiss xmm0, dword [rbx + 116] - LONG $0x2454950f; BYTE $0x40 // setne byte [rsp + 64] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd1 // setne cl + WORD $0xc108 // or cl, al + LONG $0xa8248c88; WORD $0x0000; BYTE $0x00 // mov byte [rsp + 168], cl LONG $0x78432e0f // ucomiss xmm0, dword [rbx + 120] - LONG $0x2454950f; BYTE $0x10 // setne byte [rsp + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd7950f40 // setne dil + WORD $0x0840; BYTE $0xc7 // or dil, al LONG $0x7c432e0f // ucomiss xmm0, dword [rbx + 124] - WORD $0x950f; BYTE $0xd1 // setne cl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000c024840244 // add r8b, byte [rsp + 192] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xc6 // or r14b, r8b + WORD $0x9a0f; BYTE $0xd0 // setp al + LONG $0xd6950f40 // setne sil + WORD $0x0840; BYTE $0xc6 // or sil, al WORD $0xd200 // add dl, dl - LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e6c040 // shl sil, 2 - WORD $0x0840; BYTE $0xd6 // or sil, dl - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd0 // mov r8d, edx - LONG $0x03e7c040 // shl dil, 3 - WORD $0x0840; BYTE $0xf7 // or dil, sil - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xc2 // or dl, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x0000008824b4b60f // movzx esi, byte [rsp + 136] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xf1 // or r9b, sil - WORD $0x0841; BYTE $0xd3 // or r11b, dl - WORD $0x0845; BYTE $0xe1 // or r9b, r12b - QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] - WORD $0xc000 // add al, al - LONG $0x78244402 // add al, byte [rsp + 120] - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] + WORD $0x8941; BYTE $0xd5 // mov r13d, edx + LONG $0x2454b60f; BYTE $0x38 // movzx edx, byte [rsp + 56] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xf2 // or dl, sil - LONG $0x24748b48; BYTE $0x08 // mov rsi, qword [rsp + 8] - WORD $0x8844; BYTE $0x1e // mov byte [rsi], r11b - LONG $0x247cb60f; BYTE $0x58 // movzx edi, byte [rsp + 88] - LONG $0x06e7c040 // shl dil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x014e8844 // mov byte [rsi + 1], r9b - WORD $0x0841; BYTE $0xd7 // or r15b, dl LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xc000 // add al, al - LONG $0x20244402 // add al, byte [rsp + 32] - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xd008 // or al, dl WORD $0xc289 // mov edx, eax LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] + WORD $0xc900 // add cl, cl + LONG $0x28244c02 // add cl, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax + LONG $0x08244488 // mov byte [rsp + 8], al + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0x8941; BYTE $0xc5 // mov r13d, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe8 // or al, r13b + LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + WORD $0xe2c0; BYTE $0x04 // shl dl, 4 + WORD $0xca08 // or dl, cl + QUAD $0x00008024acb60f44; BYTE $0x00 // movzx r13d, byte [rsp + 128] + LONG $0x05e5c041 // shl r13b, 5 + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0x0844; BYTE $0xe9 // or cl, r13b + LONG $0x6cb60f44; WORD $0x7824 // movzx r13d, byte [rsp + 120] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xcd // or r13b, cl + WORD $0x0045; BYTE $0xe4 // add r12b, r12b + LONG $0x24640244; BYTE $0x58 // add r12b, byte [rsp + 88] + WORD $0x8944; BYTE $0xe1 // mov ecx, r12d + QUAD $0x00008824a4b60f44; BYTE $0x00 // movzx r12d, byte [rsp + 136] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xcc // or r12b, cl + QUAD $0x00000098248cb60f // movzx ecx, byte [rsp + 152] + WORD $0xe1c0; BYTE $0x03 // shl cl, 3 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + WORD $0x8941; BYTE $0xcc // mov r12d, ecx + QUAD $0x00000090248cb60f // movzx ecx, byte [rsp + 144] + WORD $0xe1c0; BYTE $0x04 // shl cl, 4 + WORD $0x0844; BYTE $0xe1 // or cl, r12b + LONG $0x64b60f44; WORD $0x0824 // movzx r12d, byte [rsp + 8] + WORD $0x0841; BYTE $0xc4 // or r12b, al + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - LONG $0x2454b60f; BYTE $0x10 // movzx edx, byte [rsp + 16] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0xd108 // or cl, dl - WORD $0xc108 // or cl, al - LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl - LONG $0x80c38148; WORD $0x0000; BYTE $0x00 // add rbx, 128 - LONG $0x04c68348 // add rsi, 4 - LONG $0x24748948; BYTE $0x08 // mov qword [rsp + 8], rsi - QUAD $0x0000009824848348; BYTE $0xff // add qword [rsp + 152], -1 - JNE LBB5_184 - LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] - QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - QUAD $0x000000a824948b4c // mov r10, qword [rsp + 168] - JMP LBB5_186 + LONG $0x06e6c041 // shl r14b, 6 + WORD $0x0841; BYTE $0xc6 // or r14b, al + WORD $0x0841; BYTE $0xd5 // or r13b, dl + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0845; BYTE $0xf0 // or r8b, r14b + WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x000000b0249c0244 // add r11b, byte [rsp + 176] + LONG $0x02e2c041 // shl r10b, 2 + WORD $0x0845; BYTE $0xda // or r10b, r11b + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xd1 // or r9b, r10b + LONG $0x04e7c041 // shl r15b, 4 + WORD $0x0845; BYTE $0xcf // or r15b, r9b + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + WORD $0x8844; BYTE $0x20 // mov byte [rax], r12b + QUAD $0x000000a8248cb60f // movzx ecx, byte [rsp + 168] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + LONG $0x06e7c040 // shl dil, 6 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x01688844 // mov byte [rax + 1], r13b + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0844; BYTE $0xfe // or sil, r15b + LONG $0x02408844 // mov byte [rax + 2], r8b + LONG $0x03708840 // mov byte [rax + 3], sil + LONG $0x80c38148; WORD $0x0000; BYTE $0x00 // add rbx, 128 + LONG $0x04c08348 // add rax, 4 + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax + QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 + JNE LBB5_185 + LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] + QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] + QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] + JMP LBB5_187 LBB5_9: LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - QUAD $0x0000008024848948 // mov qword [rsp + 128], rax + LONG $0x24448948; BYTE $0x50 // mov qword [rsp + 80], rax -LBB5_91: +LBB5_92: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB5_94 + JNE LBB5_95 WORD $0xf631 // xor esi, esi - JMP LBB5_97 + JMP LBB5_98 -LBB5_61: +LBB5_62: LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - LONG $0x24448948; BYTE $0x58 // mov qword [rsp + 88], rax + LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax -LBB5_72: +LBB5_73: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB5_75 + JNE LBB5_76 WORD $0xf631 // xor esi, esi - JMP LBB5_78 + JMP LBB5_79 -LBB5_104: +LBB5_105: LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] -LBB5_115: +LBB5_116: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JE LBB5_117 + JE LBB5_118 WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xf631 // xor esi, esi -LBB5_121: +LBB5_122: LONG $0x1e3b4566 // cmp r11w, word [r14] WORD $0x950f; BYTE $0xd2 // setne dl WORD $0xdaf6 // neg dl @@ -26380,49 +27624,49 @@ LBB5_121: WORD $0xd830 // xor al, bl LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x3949; BYTE $0xf1 // cmp r9, rsi - JNE LBB5_121 - JMP LBB5_118 + JNE LBB5_122 + JMP LBB5_119 -LBB5_127: +LBB5_128: LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] -LBB5_138: +LBB5_139: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB5_140 + JNE LBB5_141 -LBB5_117: +LBB5_118: WORD $0xf631 // xor esi, esi - JMP LBB5_118 + JMP LBB5_119 -LBB5_175: - LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] +LBB5_176: + LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] WORD $0x894c; BYTE $0xf3 // mov rbx, r14 -LBB5_186: +LBB5_187: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xfa // cmp r10, r15 - JGE LBB5_199 + JGE LBB5_198 WORD $0x894d; BYTE $0xf8 // mov r8, r15 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xfa // add r10, r15 - JNE LBB5_191 + JNE LBB5_192 WORD $0xf631 // xor esi, esi - JMP LBB5_189 + JMP LBB5_190 -LBB5_154: +LBB5_155: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] -LBB5_155: +LBB5_156: WORD $0x3b45; BYTE $0x1e // cmp r11d, dword [r14] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -26450,23 +27694,23 @@ LBB5_155: WORD $0xd330 // xor bl, dl LONG $0x371c8841 // mov byte [r15 + rsi], bl WORD $0x3949; BYTE $0xf9 // cmp r9, rdi - JNE LBB5_155 + JNE LBB5_156 LBB5_24: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_199 + JE LBB5_198 WORD $0x3b45; BYTE $0x1e // cmp r11d, dword [r14] - JMP LBB5_197 - -LBB5_94: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0xf631 // xor esi, esi - QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] + JMP LBB5_26 LBB5_95: + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0xf631 // xor esi, esi + LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] + +LBB5_96: WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x2474b60f; BYTE $0x40 // movzx esi, byte [rsp + 64] + LONG $0x2474b60f; BYTE $0x10 // movzx esi, byte [rsp + 16] LONG $0x06343a41 // cmp sil, byte [r14 + rax] WORD $0x950f; BYTE $0xd3 // setne bl WORD $0xdbf6 // neg bl @@ -26493,30 +27737,30 @@ LBB5_95: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB5_95 + JNE LBB5_96 WORD $0x0149; BYTE $0xf6 // add r14, rsi -LBB5_97: - LONG $0x01c0f641 // test r8b, 1 - JE LBB5_199 - LONG $0x4024448a // mov al, byte [rsp + 64] - WORD $0x3a41; BYTE $0x06 // cmp al, byte [r14] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xf2 // mov rdx, rsi - LONG $0x03eac148 // shr rdx, 3 - QUAD $0x0000008024848b4c // mov r8, qword [rsp + 128] - JMP LBB5_80 +LBB5_98: + LONG $0x01c0f641 // test r8b, 1 + JE LBB5_198 + LONG $0x1024448a // mov al, byte [rsp + 16] + WORD $0x3a41; BYTE $0x06 // cmp al, byte [r14] + WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xd8f6 // neg al + WORD $0x8948; BYTE $0xf2 // mov rdx, rsi + LONG $0x03eac148 // shr rdx, 3 + LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + JMP LBB5_81 -LBB5_75: +LBB5_76: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi - LONG $0x245c8b4c; BYTE $0x58 // mov r11, qword [rsp + 88] + LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] -LBB5_76: +LBB5_77: WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x2474b60f; BYTE $0x28 // movzx esi, byte [rsp + 40] + LONG $0x2474b60f; BYTE $0x40 // movzx esi, byte [rsp + 64] LONG $0x06343a41 // cmp sil, byte [r14 + rax] WORD $0x950f; BYTE $0xd3 // setne bl WORD $0xdbf6 // neg bl @@ -26543,21 +27787,21 @@ LBB5_76: WORD $0xd030 // xor al, dl LONG $0x3b048841 // mov byte [r11 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB5_76 + JNE LBB5_77 WORD $0x0149; BYTE $0xf6 // add r14, rsi -LBB5_78: +LBB5_79: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_199 - LONG $0x2824448a // mov al, byte [rsp + 40] + JE LBB5_198 + LONG $0x4024448a // mov al, byte [rsp + 64] WORD $0x3a41; BYTE $0x06 // cmp al, byte [r14] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xf2 // mov rdx, rsi LONG $0x03eac148 // shr rdx, 3 - LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] -LBB5_80: +LBB5_81: LONG $0x103c8a41 // mov dil, byte [r8 + rdx] LONG $0x07e68040 // and sil, 7 WORD $0x01b3 // mov bl, 1 @@ -26566,57 +27810,78 @@ LBB5_80: WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil + LONG $0x101c8841 // mov byte [r8 + rdx], bl JMP LBB5_198 -LBB5_193: +LBB5_194: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] -LBB5_194: +LBB5_195: LONG $0x2e0f4166; BYTE $0x06 // ucomisd xmm0, qword [r14] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd0 // setne al + WORD $0xc808 // or al, cl WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfe // mov rsi, rdi LONG $0x03eec148 // shr rsi, 3 - LONG $0x14b60f45; BYTE $0x33 // movzx r10d, byte [r11 + rsi] - WORD $0x3044; BYTE $0xd0 // xor al, r10b + LONG $0x14b60f41; BYTE $0x33 // movzx edx, byte [r11 + rsi] WORD $0xf989 // mov ecx, edi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0xc220 // and dl, al - WORD $0x3044; BYTE $0xd2 // xor dl, r10b - LONG $0x33148841 // mov byte [r11 + rsi], dl - LONG $0x02c78348 // add rdi, 2 - LONG $0x2e0f4166; WORD $0x0846 // ucomisd xmm0, qword [r14 + 8] - LONG $0x10768d4d // lea r14, [r14 + 16] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0xd030 // xor al, dl - WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + WORD $0xd030 // xor al, dl WORD $0xc320 // and bl, al WORD $0xd330 // xor bl, dl LONG $0x331c8841 // mov byte [r11 + rsi], bl + LONG $0x02c78348 // add rdi, 2 + LONG $0x2e0f4166; WORD $0x0846 // ucomisd xmm0, qword [r14 + 8] + LONG $0x10768d4d // lea r14, [r14 + 16] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + WORD $0xdaf6 // neg dl + WORD $0xda30 // xor dl, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd020 // and al, dl + WORD $0xd830 // xor al, bl + LONG $0x33048841 // mov byte [r11 + rsi], al WORD $0x3949; BYTE $0xf9 // cmp r9, rdi - JNE LBB5_194 + JNE LBB5_195 -LBB5_195: +LBB5_196: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_199 + JE LBB5_198 LONG $0x2e0f4166; BYTE $0x06 // ucomisd xmm0, qword [r14] - JMP LBB5_197 + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] + LONG $0x00348a41 // mov sil, byte [r8 + rax] + LONG $0x07e78040 // and dil, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0xf989 // mov ecx, edi + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf2 // xor dl, sil + WORD $0xd320 // and bl, dl + WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x001c8841 // mov byte [r8 + rax], bl + JMP LBB5_198 -LBB5_168: +LBB5_169: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xff31 // xor edi, edi LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] -LBB5_169: +LBB5_170: WORD $0x3b4d; BYTE $0x1e // cmp r11, qword [r14] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -26644,14 +27909,14 @@ LBB5_169: WORD $0xd330 // xor bl, dl LONG $0x371c8841 // mov byte [r15 + rsi], bl WORD $0x3949; BYTE $0xf9 // cmp r9, rdi - JNE LBB5_169 + JNE LBB5_170 -LBB5_40: +LBB5_41: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_199 + JE LBB5_198 WORD $0x3b4d; BYTE $0x1e // cmp r11, qword [r14] -LBB5_197: +LBB5_26: WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al WORD $0x8948; BYTE $0xfa // mov rdx, rdi @@ -26665,17 +27930,15 @@ LBB5_197: WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xf3 // xor bl, sil + LONG $0x101c8841 // mov byte [r8 + rdx], bl + JMP LBB5_198 -LBB5_198: - LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB5_199 - -LBB5_140: +LBB5_141: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xf631 // xor esi, esi -LBB5_141: +LBB5_142: LONG $0x1e3b4566 // cmp r11w, word [r14] WORD $0x950f; BYTE $0xd2 // setne dl WORD $0xdaf6 // neg dl @@ -26703,11 +27966,11 @@ LBB5_141: WORD $0xd830 // xor al, bl LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x3949; BYTE $0xf1 // cmp r9, rsi - JNE LBB5_141 + JNE LBB5_142 -LBB5_118: +LBB5_119: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_199 + JE LBB5_198 LONG $0x1e3b4566 // cmp r11w, word [r14] WORD $0x950f; BYTE $0xd0 // setne al WORD $0xd8f6 // neg al @@ -26722,97 +27985,103 @@ LBB5_118: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x141c8841 // mov byte [r12 + rdx], bl - JMP LBB5_199 + JMP LBB5_198 -LBB5_191: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 +LBB5_192: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 WORD $0xf631 // xor esi, esi - WORD $0x894d; BYTE $0xde // mov r14, r11 + WORD $0x894d; BYTE $0xe6 // mov r14, r12 -LBB5_192: +LBB5_193: WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] + WORD $0x9a0f; BYTE $0xd1 // setp cl WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xca08 // or dl, cl WORD $0xdaf6 // neg dl WORD $0x8948; BYTE $0xf7 // mov rdi, rsi LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b + LONG $0x14b60f45; BYTE $0x3e // movzx r10d, byte [r14 + rdi] WORD $0xf189 // mov ecx, esi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd020 // and al, dl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3e048841 // mov byte [r14 + rdi], al + WORD $0xb341; BYTE $0x01 // mov r11b, 1 + WORD $0xd241; BYTE $0xe3 // shl r11b, cl + WORD $0x3044; BYTE $0xd2 // xor dl, r10b + WORD $0x2041; BYTE $0xd3 // and r11b, dl + WORD $0x3045; BYTE $0xd3 // xor r11b, r10b + LONG $0x3e1c8845 // mov byte [r14 + rdi], r11b LONG $0x02c68348 // add rsi, 2 LONG $0x04432e0f // ucomiss xmm0, dword [rbx + 4] LONG $0x085b8d48 // lea rbx, [rbx + 8] - LONG $0xd1950f41 // setne r9b - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xc1 // xor r9b, al + LONG $0xd29a0f41 // setp r10b + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0x0844; BYTE $0xd2 // or dl, r10b + WORD $0xdaf6 // neg dl + WORD $0x3044; BYTE $0xda // xor dl, r11b WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b2 // mov dl, 1 - WORD $0xe2d2 // shl dl, cl - WORD $0x2044; BYTE $0xca // and dl, r9b - WORD $0xc230 // xor dl, al - LONG $0x3e148841 // mov byte [r14 + rdi], dl - WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB5_192 + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd020 // and al, dl + WORD $0x3044; BYTE $0xd8 // xor al, r11b + LONG $0x3e048841 // mov byte [r14 + rdi], al + WORD $0x3949; BYTE $0xf1 // cmp r9, rsi + JNE LBB5_193 -LBB5_189: +LBB5_190: LONG $0x01c0f641 // test r8b, 1 - JE LBB5_199 + JE LBB5_198 WORD $0x2e0f; BYTE $0x03 // ucomiss xmm0, dword [rbx] - WORD $0x950f; BYTE $0xd0 // setne al - WORD $0xd8f6 // neg al - WORD $0x8948; BYTE $0xf2 // mov rdx, rsi - LONG $0x03eac148 // shr rdx, 3 - LONG $0x133c8a41 // mov dil, byte [r11 + rdx] + WORD $0x9a0f; BYTE $0xd0 // setp al + WORD $0x950f; BYTE $0xd2 // setne dl + WORD $0xc208 // or dl, al + WORD $0xdaf6 // neg dl + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x03e8c148 // shr rax, 3 + LONG $0x043c8a41 // mov dil, byte [r12 + rax] LONG $0x07e68040 // and sil, 7 WORD $0x01b3 // mov bl, 1 WORD $0xf189 // mov ecx, esi WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xfa // xor dl, dil + WORD $0xd320 // and bl, dl WORD $0x3040; BYTE $0xfb // xor bl, dil - LONG $0x131c8841 // mov byte [r11 + rdx], bl + LONG $0x041c8841 // mov byte [r12 + rax], bl -LBB5_199: +LBB5_198: MOVQ 288(SP), SP RET -LBB5_85: +LBB5_86: LONG $0xf0e28349 // and r10, -16 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x014c; BYTE $0xf0 // add rax, r14 QUAD $0x0000011024848948 // mov qword [rsp + 272], rax - QUAD $0x000000d82494894c // mov qword [rsp + 216], r10 + QUAD $0x000000e82494894c // mov qword [rsp + 232], r10 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] LONG $0x90048d4a // lea rax, [rax + 4*r10] - QUAD $0x0000008024848948 // mov qword [rsp + 128], rax - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] + LONG $0x24448948; BYTE $0x50 // mov qword [rsp + 80], rax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 WORD $0xc031 // xor eax, eax -LBB5_86: +LBB5_87: QUAD $0x000000a824848948 // mov qword [rsp + 168], rax LONG $0x05e0c148 // shl rax, 5 - WORD $0x8949; BYTE $0xc1 // mov r9, rax - WORD $0x8948; BYTE $0xc3 // mov rbx, rax - WORD $0x8949; BYTE $0xc7 // mov r15, rax WORD $0x8948; BYTE $0xc2 // mov rdx, rax - WORD $0x8949; BYTE $0xc5 // mov r13, rax - WORD $0x8949; BYTE $0xc0 // mov r8, rax WORD $0x8949; BYTE $0xc4 // mov r12, rax + WORD $0x8949; BYTE $0xc0 // mov r8, rax WORD $0x8949; BYTE $0xc2 // mov r10, rax + WORD $0x8949; BYTE $0xc7 // mov r15, rax + WORD $0x8949; BYTE $0xc1 // mov r9, rax WORD $0x8949; BYTE $0xc3 // mov r11, rax + WORD $0x8948; BYTE $0xc3 // mov rbx, rax WORD $0x8948; BYTE $0xc6 // mov rsi, rax - LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax + WORD $0x8949; BYTE $0xc5 // mov r13, rax LONG $0x0cb60f41; BYTE $0x06 // movzx ecx, byte [r14 + rax] LONG $0xe16e0f66 // movd xmm4, ecx LONG $0x4cb60f41; WORD $0x0106 // movzx ecx, byte [r14 + rax + 1] @@ -26831,7 +28100,7 @@ LBB5_86: LONG $0x6e0f4466; BYTE $0xf1 // movd xmm14, ecx LONG $0x4cb60f41; WORD $0x0806 // movzx ecx, byte [r14 + rax + 8] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00010024847f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm0 + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 LONG $0x4cb60f41; WORD $0x0906 // movzx ecx, byte [r14 + rax + 9] LONG $0x6e0f4466; BYTE $0xd9 // movd xmm11, ecx LONG $0x4cb60f41; WORD $0x0a06 // movzx ecx, byte [r14 + rax + 10] @@ -26840,7 +28109,7 @@ LBB5_86: LONG $0x6e0f4466; BYTE $0xe9 // movd xmm13, ecx LONG $0x4cb60f41; WORD $0x0c06 // movzx ecx, byte [r14 + rax + 12] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x0000e024847f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm0 + QUAD $0x0000d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm0 LONG $0x4cb60f41; WORD $0x0d06 // movzx ecx, byte [r14 + rax + 13] LONG $0xf16e0f66 // movd xmm6, ecx LONG $0x4cb60f41; WORD $0x0e06 // movzx ecx, byte [r14 + rax + 14] @@ -26849,155 +28118,150 @@ LBB5_86: LONG $0xc16e0f66 // movd xmm0, ecx QUAD $0x0000c024847f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm0 WORD $0x8948; BYTE $0xc1 // mov rcx, rax - LONG $0x24448948; BYTE $0x50 // mov qword [rsp + 80], rax + QUAD $0x0000008024848948 // mov qword [rsp + 128], rax WORD $0x8948; BYTE $0xc7 // mov rdi, rax LONG $0x20cf8348 // or rdi, 32 - LONG $0x247c8948; BYTE $0x18 // mov qword [rsp + 24], rdi - LONG $0x40c98349 // or r9, 64 - LONG $0x244c894c; BYTE $0x48 // mov qword [rsp + 72], r9 - LONG $0x60cb8348 // or rbx, 96 - LONG $0x245c8948; BYTE $0x20 // mov qword [rsp + 32], rbx - LONG $0x80cf8149; WORD $0x0000; BYTE $0x00 // or r15, 128 - LONG $0x247c894c; BYTE $0x30 // mov qword [rsp + 48], r15 - LONG $0xa0ca8148; WORD $0x0000; BYTE $0x00 // or rdx, 160 - LONG $0xc0cd8149; WORD $0x0000; BYTE $0x00 // or r13, 192 - LONG $0xe0c88149; WORD $0x0000; BYTE $0x00 // or r8, 224 - LONG $0x00cc8149; WORD $0x0001; BYTE $0x00 // or r12, 256 - LONG $0x20ca8149; WORD $0x0001; BYTE $0x00 // or r10, 288 - LONG $0x40cb8149; WORD $0x0001; BYTE $0x00 // or r11, 320 + LONG $0x40ca8348 // or rdx, 64 + LONG $0x24548948; BYTE $0x28 // mov qword [rsp + 40], rdx + LONG $0x60cc8349 // or r12, 96 + LONG $0x80c88149; WORD $0x0000; BYTE $0x00 // or r8, 128 + LONG $0xa0ca8149; WORD $0x0000; BYTE $0x00 // or r10, 160 + LONG $0xc0cf8149; WORD $0x0000; BYTE $0x00 // or r15, 192 + LONG $0xe0c98149; WORD $0x0000; BYTE $0x00 // or r9, 224 + LONG $0x00cb8149; WORD $0x0001; BYTE $0x00 // or r11, 256 + LONG $0x20cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 288 + LONG $0x40ce8148; WORD $0x0001; BYTE $0x00 // or rsi, 320 + LONG $0x24748948; BYTE $0x68 // mov qword [rsp + 104], rsi + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] LONG $0x60ce8148; WORD $0x0001; BYTE $0x00 // or rsi, 352 - LONG $0x24748948; BYTE $0x58 // mov qword [rsp + 88], rsi - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - LONG $0x80ce8148; WORD $0x0001; BYTE $0x00 // or rsi, 384 - LONG $0x24748948; BYTE $0x38 // mov qword [rsp + 56], rsi + LONG $0x24748948; BYTE $0x48 // mov qword [rsp + 72], rsi + LONG $0x80cd8149; WORD $0x0001; BYTE $0x00 // or r13, 384 + LONG $0x246c894c; BYTE $0x70 // mov qword [rsp + 112], r13 LONG $0x01a00d48; WORD $0x0000 // or rax, 416 - LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax + LONG $0x24448948; BYTE $0x40 // mov qword [rsp + 64], rax + WORD $0x8949; BYTE $0xcd // mov r13, rcx + LONG $0xc0cd8149; WORD $0x0001; BYTE $0x00 // or r13, 448 + LONG $0x246c894c; BYTE $0x38 // mov qword [rsp + 56], r13 LONG $0xe0c98148; WORD $0x0001; BYTE $0x00 // or rcx, 480 - LONG $0x244c8948; BYTE $0x10 // mov qword [rsp + 16], rcx + LONG $0x244c8948; BYTE $0x20 // mov qword [rsp + 32], rcx + WORD $0x8948; BYTE $0xf9 // mov rcx, rdi QUAD $0x013e24203a0f4166 // pinsrb xmm4, byte [r14 + rdi], 1 - QUAD $0x020e24203a0f4366 // pinsrb xmm4, byte [r14 + r9], 2 - QUAD $0x031e24203a0f4166 // pinsrb xmm4, byte [r14 + rbx], 3 - QUAD $0x043e24203a0f4366 // pinsrb xmm4, byte [r14 + r15], 4 - WORD $0x8948; BYTE $0xd7 // mov rdi, rdx - QUAD $0x051624203a0f4166 // pinsrb xmm4, byte [r14 + rdx], 5 - WORD $0x894c; BYTE $0xea // mov rdx, r13 - QUAD $0x0000009824ac894c // mov qword [rsp + 152], r13 - QUAD $0x062e24203a0f4366 // pinsrb xmm4, byte [r14 + r13], 6 - WORD $0x894d; BYTE $0xc5 // mov r13, r8 - QUAD $0x070624203a0f4366 // pinsrb xmm4, byte [r14 + r8], 7 - WORD $0x894d; BYTE $0xe0 // mov r8, r12 - QUAD $0x082624203a0f4366 // pinsrb xmm4, byte [r14 + r12], 8 - QUAD $0x091624203a0f4366 // pinsrb xmm4, byte [r14 + r10], 9 - LONG $0x245c894c; BYTE $0x70 // mov qword [rsp + 112], r11 - QUAD $0x0a1e24203a0f4366 // pinsrb xmm4, byte [r14 + r11], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x0b0624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 11 - QUAD $0x0c3624203a0f4166 // pinsrb xmm4, byte [r14 + rsi], 12 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x0d0e24203a0f4166 // pinsrb xmm4, byte [r14 + rcx], 13 - LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] - QUAD $0x0e2624203a0f4366 // pinsrb xmm4, byte [r14 + r12], 14 - LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] - QUAD $0x0f1e24203a0f4166 // pinsrb xmm4, byte [r14 + rbx], 15 - LONG $0x247c8b4c; BYTE $0x18 // mov r15, qword [rsp + 24] - QUAD $0x013e5c203a0f4366; BYTE $0x01 // pinsrb xmm3, byte [r14 + r15 + 1], 1 - QUAD $0x010e5c203a0f4366; BYTE $0x02 // pinsrb xmm3, byte [r14 + r9 + 1], 2 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x011e5c203a0f4166; BYTE $0x03 // pinsrb xmm3, byte [r14 + rbx + 1], 3 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] - QUAD $0x010e5c203a0f4366; BYTE $0x04 // pinsrb xmm3, byte [r14 + r9 + 1], 4 - QUAD $0x013e5c203a0f4166; BYTE $0x05 // pinsrb xmm3, byte [r14 + rdi + 1], 5 - LONG $0x247c8948; BYTE $0x60 // mov qword [rsp + 96], rdi - QUAD $0x01165c203a0f4166; BYTE $0x06 // pinsrb xmm3, byte [r14 + rdx + 1], 6 - QUAD $0x012e5c203a0f4366; BYTE $0x07 // pinsrb xmm3, byte [r14 + r13 + 1], 7 - WORD $0x894c; BYTE $0xeb // mov rbx, r13 - QUAD $0x01065c203a0f4366; BYTE $0x08 // pinsrb xmm3, byte [r14 + r8 + 1], 8 - WORD $0x894d; BYTE $0xc5 // mov r13, r8 - QUAD $0x01165c203a0f4366; BYTE $0x09 // pinsrb xmm3, byte [r14 + r10 + 1], 9 - WORD $0x894c; BYTE $0xd2 // mov rdx, r10 - QUAD $0x000000902494894c // mov qword [rsp + 144], r10 - QUAD $0x011e5c203a0f4366; BYTE $0x0a // pinsrb xmm3, byte [r14 + r11 + 1], 10 - QUAD $0x01065c203a0f4166; BYTE $0x0b // pinsrb xmm3, byte [r14 + rax + 1], 11 - QUAD $0x01365c203a0f4166; BYTE $0x0c // pinsrb xmm3, byte [r14 + rsi + 1], 12 - QUAD $0x010e5c203a0f4166; BYTE $0x0d // pinsrb xmm3, byte [r14 + rcx + 1], 13 - QUAD $0x01265c203a0f4366; BYTE $0x0e // pinsrb xmm3, byte [r14 + r12 + 1], 14 + QUAD $0x021624203a0f4166 // pinsrb xmm4, byte [r14 + rdx], 2 + LONG $0x2464894c; BYTE $0x78 // mov qword [rsp + 120], r12 + QUAD $0x032624203a0f4366 // pinsrb xmm4, byte [r14 + r12], 3 + QUAD $0x040624203a0f4366 // pinsrb xmm4, byte [r14 + r8], 4 + LONG $0x2454894c; BYTE $0x60 // mov qword [rsp + 96], r10 + QUAD $0x051624203a0f4366 // pinsrb xmm4, byte [r14 + r10], 5 + QUAD $0x0000008824bc894c // mov qword [rsp + 136], r15 + QUAD $0x063e24203a0f4366 // pinsrb xmm4, byte [r14 + r15], 6 + QUAD $0x070e24203a0f4366 // pinsrb xmm4, byte [r14 + r9], 7 + QUAD $0x081e24203a0f4366 // pinsrb xmm4, byte [r14 + r11], 8 + LONG $0x245c8948; BYTE $0x58 // mov qword [rsp + 88], rbx + QUAD $0x091e24203a0f4166 // pinsrb xmm4, byte [r14 + rbx], 9 + LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + QUAD $0x0a1e24203a0f4166 // pinsrb xmm4, byte [r14 + rbx], 10 + QUAD $0x0b3624203a0f4166 // pinsrb xmm4, byte [r14 + rsi], 11 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x0c3e24203a0f4166 // pinsrb xmm4, byte [r14 + rdi], 12 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0d0624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 13 + QUAD $0x0e2e24203a0f4366 // pinsrb xmm4, byte [r14 + r13], 14 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0f0624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 15 + QUAD $0x010e5c203a0f4166; BYTE $0x01 // pinsrb xmm3, byte [r14 + rcx + 1], 1 + QUAD $0x01165c203a0f4166; BYTE $0x02 // pinsrb xmm3, byte [r14 + rdx + 1], 2 + QUAD $0x01265c203a0f4366; BYTE $0x03 // pinsrb xmm3, byte [r14 + r12 + 1], 3 + QUAD $0x01065c203a0f4366; BYTE $0x04 // pinsrb xmm3, byte [r14 + r8 + 1], 4 + WORD $0x894c; BYTE $0xc2 // mov rdx, r8 + QUAD $0x01165c203a0f4366; BYTE $0x05 // pinsrb xmm3, byte [r14 + r10 + 1], 5 + QUAD $0x013e5c203a0f4366; BYTE $0x06 // pinsrb xmm3, byte [r14 + r15 + 1], 6 + QUAD $0x010e5c203a0f4366; BYTE $0x07 // pinsrb xmm3, byte [r14 + r9 + 1], 7 + WORD $0x894d; BYTE $0xc8 // mov r8, r9 + QUAD $0x011e5c203a0f4366; BYTE $0x08 // pinsrb xmm3, byte [r14 + r11 + 1], 8 + WORD $0x894d; BYTE $0xda // mov r10, r11 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] + QUAD $0x01265c203a0f4366; BYTE $0x09 // pinsrb xmm3, byte [r14 + r12 + 1], 9 + QUAD $0x011e5c203a0f4166; BYTE $0x0a // pinsrb xmm3, byte [r14 + rbx + 1], 10 + QUAD $0x01365c203a0f4166; BYTE $0x0b // pinsrb xmm3, byte [r14 + rsi + 1], 11 + QUAD $0x013e5c203a0f4166; BYTE $0x0c // pinsrb xmm3, byte [r14 + rdi + 1], 12 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x01065c203a0f4166; BYTE $0x0d // pinsrb xmm3, byte [r14 + rax + 1], 13 + QUAD $0x012e5c203a0f4366; BYTE $0x0e // pinsrb xmm3, byte [r14 + r13 + 1], 14 QUAD $0x0000b0248c6f0f66; BYTE $0x00 // movdqa xmm1, oword [rsp + 176] LONG $0xe1740f66 // pcmpeqb xmm4, xmm1 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] - QUAD $0x01065c203a0f4166; BYTE $0x0f // pinsrb xmm3, byte [r14 + rax + 1], 15 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x012e5c203a0f4366; BYTE $0x0f // pinsrb xmm3, byte [r14 + r13 + 1], 15 LONG $0xd9740f66 // pcmpeqb xmm3, xmm1 QUAD $0x00000100856f0f66 // movdqa xmm0, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xd8df0f66 // pandn xmm3, xmm0 LONG $0xdcfc0f66 // paddb xmm3, xmm4 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] LONG $0x74b60f41; WORD $0x1006 // movzx esi, byte [r14 + rax + 16] LONG $0x6e0f4466; BYTE $0xd6 // movd xmm10, esi - LONG $0x24648b4c; BYTE $0x18 // mov r12, qword [rsp + 24] - QUAD $0x02266c203a0f4366; BYTE $0x01 // pinsrb xmm5, byte [r14 + r12 + 2], 1 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + QUAD $0x020e6c203a0f4166; BYTE $0x01 // pinsrb xmm5, byte [r14 + rcx + 2], 1 + WORD $0x8948; BYTE $0xc8 // mov rax, rcx + LONG $0x244c8948; BYTE $0x18 // mov qword [rsp + 24], rcx + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] QUAD $0x020e6c203a0f4166; BYTE $0x02 // pinsrb xmm5, byte [r14 + rcx + 2], 2 - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x247c8b4c; BYTE $0x78 // mov r15, qword [rsp + 120] QUAD $0x023e6c203a0f4366; BYTE $0x03 // pinsrb xmm5, byte [r14 + r15 + 2], 3 - WORD $0x894d; BYTE $0xcb // mov r11, r9 - QUAD $0x020e6c203a0f4366; BYTE $0x04 // pinsrb xmm5, byte [r14 + r9 + 2], 4 - QUAD $0x023e6c203a0f4166; BYTE $0x05 // pinsrb xmm5, byte [r14 + rdi + 2], 5 - QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] - QUAD $0x02166c203a0f4366; BYTE $0x06 // pinsrb xmm5, byte [r14 + r10 + 2], 6 - WORD $0x8949; BYTE $0xd8 // mov r8, rbx - QUAD $0x021e6c203a0f4166; BYTE $0x07 // pinsrb xmm5, byte [r14 + rbx + 2], 7 - LONG $0x246c894c; BYTE $0x68 // mov qword [rsp + 104], r13 - QUAD $0x022e6c203a0f4366; BYTE $0x08 // pinsrb xmm5, byte [r14 + r13 + 2], 8 - QUAD $0x02166c203a0f4166; BYTE $0x09 // pinsrb xmm5, byte [r14 + rdx + 2], 9 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] - QUAD $0x023e6c203a0f4166; BYTE $0x0a // pinsrb xmm5, byte [r14 + rdi + 2], 10 - LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] - QUAD $0x02366c203a0f4166; BYTE $0x0b // pinsrb xmm5, byte [r14 + rsi + 2], 11 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x02066c203a0f4166; BYTE $0x0c // pinsrb xmm5, byte [r14 + rax + 2], 12 - LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] - QUAD $0x021e6c203a0f4166; BYTE $0x0d // pinsrb xmm5, byte [r14 + rbx + 2], 13 - LONG $0x244c8b4c; BYTE $0x28 // mov r9, qword [rsp + 40] - QUAD $0x020e6c203a0f4366; BYTE $0x0e // pinsrb xmm5, byte [r14 + r9 + 2], 14 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x02166c203a0f4166; BYTE $0x0f // pinsrb xmm5, byte [r14 + rdx + 2], 15 - QUAD $0x03267c203a0f4366; BYTE $0x01 // pinsrb xmm7, byte [r14 + r12 + 3], 1 - QUAD $0x030e7c203a0f4166; BYTE $0x02 // pinsrb xmm7, byte [r14 + rcx + 3], 2 + WORD $0x8948; BYTE $0xd3 // mov rbx, rdx + QUAD $0x02166c203a0f4166; BYTE $0x04 // pinsrb xmm5, byte [r14 + rdx + 2], 4 + LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] + QUAD $0x020e6c203a0f4366; BYTE $0x05 // pinsrb xmm5, byte [r14 + r9 + 2], 5 + QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] + QUAD $0x021e6c203a0f4366; BYTE $0x06 // pinsrb xmm5, byte [r14 + r11 + 2], 6 + WORD $0x894c; BYTE $0xc2 // mov rdx, r8 + LONG $0x2444894c; BYTE $0x30 // mov qword [rsp + 48], r8 + QUAD $0x02066c203a0f4366; BYTE $0x07 // pinsrb xmm5, byte [r14 + r8 + 2], 7 + QUAD $0x02166c203a0f4366; BYTE $0x08 // pinsrb xmm5, byte [r14 + r10 + 2], 8 + QUAD $0x02266c203a0f4366; BYTE $0x09 // pinsrb xmm5, byte [r14 + r12 + 2], 9 + LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] + QUAD $0x02066c203a0f4366; BYTE $0x0a // pinsrb xmm5, byte [r14 + r8 + 2], 10 + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + QUAD $0x020e6c203a0f4166; BYTE $0x0b // pinsrb xmm5, byte [r14 + rcx + 2], 11 + QUAD $0x023e6c203a0f4166; BYTE $0x0c // pinsrb xmm5, byte [r14 + rdi + 2], 12 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x02366c203a0f4166; BYTE $0x0d // pinsrb xmm5, byte [r14 + rsi + 2], 13 + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + QUAD $0x02366c203a0f4166; BYTE $0x0e // pinsrb xmm5, byte [r14 + rsi + 2], 14 + QUAD $0x022e6c203a0f4366; BYTE $0x0f // pinsrb xmm5, byte [r14 + r13 + 2], 15 + QUAD $0x03067c203a0f4166; BYTE $0x01 // pinsrb xmm7, byte [r14 + rax + 3], 1 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x03067c203a0f4166; BYTE $0x02 // pinsrb xmm7, byte [r14 + rax + 3], 2 QUAD $0x033e7c203a0f4366; BYTE $0x03 // pinsrb xmm7, byte [r14 + r15 + 3], 3 - QUAD $0x031e7c203a0f4366; BYTE $0x04 // pinsrb xmm7, byte [r14 + r11 + 3], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x03067c203a0f4166; BYTE $0x05 // pinsrb xmm7, byte [r14 + rax + 3], 5 - QUAD $0x03167c203a0f4366; BYTE $0x06 // pinsrb xmm7, byte [r14 + r10 + 3], 6 - QUAD $0x03067c203a0f4366; BYTE $0x07 // pinsrb xmm7, byte [r14 + r8 + 3], 7 - QUAD $0x032e7c203a0f4366; BYTE $0x08 // pinsrb xmm7, byte [r14 + r13 + 3], 8 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x03067c203a0f4166; BYTE $0x09 // pinsrb xmm7, byte [r14 + rax + 3], 9 - QUAD $0x033e7c203a0f4166; BYTE $0x0a // pinsrb xmm7, byte [r14 + rdi + 3], 10 - QUAD $0x03367c203a0f4166; BYTE $0x0b // pinsrb xmm7, byte [r14 + rsi + 3], 11 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x03067c203a0f4166; BYTE $0x0c // pinsrb xmm7, byte [r14 + rax + 3], 12 - QUAD $0x031e7c203a0f4166; BYTE $0x0d // pinsrb xmm7, byte [r14 + rbx + 3], 13 - QUAD $0x030e7c203a0f4366; BYTE $0x0e // pinsrb xmm7, byte [r14 + r9 + 3], 14 - QUAD $0x03167c203a0f4166; BYTE $0x0f // pinsrb xmm7, byte [r14 + rdx + 3], 15 - QUAD $0x04264c203a0f4766; BYTE $0x01 // pinsrb xmm9, byte [r14 + r12 + 4], 1 - QUAD $0x040e4c203a0f4566; BYTE $0x02 // pinsrb xmm9, byte [r14 + rcx + 4], 2 + QUAD $0x031e7c203a0f4166; BYTE $0x04 // pinsrb xmm7, byte [r14 + rbx + 3], 4 + QUAD $0x030e7c203a0f4366; BYTE $0x05 // pinsrb xmm7, byte [r14 + r9 + 3], 5 + QUAD $0x031e7c203a0f4366; BYTE $0x06 // pinsrb xmm7, byte [r14 + r11 + 3], 6 + QUAD $0x03167c203a0f4166; BYTE $0x07 // pinsrb xmm7, byte [r14 + rdx + 3], 7 + QUAD $0x03167c203a0f4366; BYTE $0x08 // pinsrb xmm7, byte [r14 + r10 + 3], 8 + QUAD $0x03267c203a0f4366; BYTE $0x09 // pinsrb xmm7, byte [r14 + r12 + 3], 9 + QUAD $0x03067c203a0f4366; BYTE $0x0a // pinsrb xmm7, byte [r14 + r8 + 3], 10 + QUAD $0x030e7c203a0f4166; BYTE $0x0b // pinsrb xmm7, byte [r14 + rcx + 3], 11 + QUAD $0x033e7c203a0f4166; BYTE $0x0c // pinsrb xmm7, byte [r14 + rdi + 3], 12 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x03067c203a0f4166; BYTE $0x0d // pinsrb xmm7, byte [r14 + rax + 3], 13 + QUAD $0x03367c203a0f4166; BYTE $0x0e // pinsrb xmm7, byte [r14 + rsi + 3], 14 + QUAD $0x032e7c203a0f4366; BYTE $0x0f // pinsrb xmm7, byte [r14 + r13 + 3], 15 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x04064c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rax + 4], 1 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x04064c203a0f4566; BYTE $0x02 // pinsrb xmm9, byte [r14 + rax + 4], 2 QUAD $0x043e4c203a0f4766; BYTE $0x03 // pinsrb xmm9, byte [r14 + r15 + 4], 3 - QUAD $0x041e4c203a0f4766; BYTE $0x04 // pinsrb xmm9, byte [r14 + r11 + 4], 4 - LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] - QUAD $0x04264c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r12 + 4], 5 - QUAD $0x04164c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r10 + 4], 6 - QUAD $0x04064c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r8 + 4], 7 - QUAD $0x042e4c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r13 + 4], 8 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] - QUAD $0x040e4c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rcx + 4], 9 - QUAD $0x043e4c203a0f4566; BYTE $0x0a // pinsrb xmm9, byte [r14 + rdi + 4], 10 - QUAD $0x04364c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rsi + 4], 11 - QUAD $0x04064c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rax + 4], 12 - QUAD $0x041e4c203a0f4566; BYTE $0x0d // pinsrb xmm9, byte [r14 + rbx + 4], 13 - QUAD $0x040e4c203a0f4766; BYTE $0x0e // pinsrb xmm9, byte [r14 + r9 + 4], 14 - QUAD $0x04164c203a0f4566; BYTE $0x0f // pinsrb xmm9, byte [r14 + rdx + 4], 15 + QUAD $0x041e4c203a0f4566; BYTE $0x04 // pinsrb xmm9, byte [r14 + rbx + 4], 4 + QUAD $0x040e4c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r9 + 4], 5 + QUAD $0x041e4c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r11 + 4], 6 + QUAD $0x04164c203a0f4566; BYTE $0x07 // pinsrb xmm9, byte [r14 + rdx + 4], 7 + QUAD $0x04164c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r10 + 4], 8 + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x04264c203a0f4766; BYTE $0x09 // pinsrb xmm9, byte [r14 + r12 + 4], 9 + QUAD $0x04064c203a0f4766; BYTE $0x0a // pinsrb xmm9, byte [r14 + r8 + 4], 10 + QUAD $0x040e4c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rcx + 4], 11 + QUAD $0x043e4c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rdi + 4], 12 + LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] + QUAD $0x04164c203a0f4566; BYTE $0x0d // pinsrb xmm9, byte [r14 + rdx + 4], 13 + QUAD $0x04364c203a0f4566; BYTE $0x0e // pinsrb xmm9, byte [r14 + rsi + 4], 14 + QUAD $0x042e4c203a0f4766; BYTE $0x0f // pinsrb xmm9, byte [r14 + r13 + 4], 15 LONG $0xe9740f66 // pcmpeqb xmm5, xmm1 QUAD $0x00000110856f0f66 // movdqa xmm0, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0xe8df0f66 // pandn xmm5, xmm0 @@ -27005,87 +28269,82 @@ LBB5_86: QUAD $0x00000120856f0f66 // movdqa xmm0, oword 288[rbp] /* [rip + .LCPI5_18] */ LONG $0xf8df0f66 // pandn xmm7, xmm0 LONG $0xfdeb0f66 // por xmm7, xmm5 - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] - LONG $0x74b60f41; WORD $0x1116 // movzx esi, byte [r14 + rdx + 17] + QUAD $0x00000080248c8b4c // mov r9, qword [rsp + 128] + LONG $0x74b60f43; WORD $0x110e // movzx esi, byte [r14 + r9 + 17] LONG $0xe66e0f66 // movd xmm4, esi LONG $0x740f4466; BYTE $0xc9 // pcmpeqb xmm9, xmm1 QUAD $0x00000130856f0f66 // movdqa xmm0, oword 304[rbp] /* [rip + .LCPI5_19] */ LONG $0xdf0f4466; BYTE $0xc8 // pandn xmm9, xmm0 LONG $0xeb0f4466; BYTE $0xcf // por xmm9, xmm7 - LONG $0x74b60f41; WORD $0x1216 // movzx esi, byte [r14 + rdx + 18] + LONG $0x74b60f43; WORD $0x120e // movzx esi, byte [r14 + r9 + 18] LONG $0xfe6e0f66 // movd xmm7, esi LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 LONG $0xd8f80f66 // psubb xmm3, xmm0 LONG $0xeb0f4466; BYTE $0xcb // por xmm9, xmm3 - LONG $0x74b60f41; WORD $0x1316 // movzx esi, byte [r14 + rdx + 19] + LONG $0x74b60f43; WORD $0x130e // movzx esi, byte [r14 + r9 + 19] LONG $0xee6e0f66 // movd xmm5, esi - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] - QUAD $0x051654203a0f4166; BYTE $0x01 // pinsrb xmm2, byte [r14 + rdx + 5], 1 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] - QUAD $0x053e54203a0f4166; BYTE $0x02 // pinsrb xmm2, byte [r14 + rdi + 5], 2 + LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] + QUAD $0x050e54203a0f4366; BYTE $0x01 // pinsrb xmm2, byte [r14 + r9 + 5], 1 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x050654203a0f4166; BYTE $0x02 // pinsrb xmm2, byte [r14 + rax + 5], 2 QUAD $0x053e54203a0f4366; BYTE $0x03 // pinsrb xmm2, byte [r14 + r15 + 5], 3 - QUAD $0x051e54203a0f4366; BYTE $0x04 // pinsrb xmm2, byte [r14 + r11 + 5], 4 - WORD $0x894d; BYTE $0xe1 // mov r9, r12 - QUAD $0x052654203a0f4366; BYTE $0x05 // pinsrb xmm2, byte [r14 + r12 + 5], 5 - QUAD $0x051654203a0f4366; BYTE $0x06 // pinsrb xmm2, byte [r14 + r10 + 5], 6 - WORD $0x894d; BYTE $0xc5 // mov r13, r8 - QUAD $0x050654203a0f4366; BYTE $0x07 // pinsrb xmm2, byte [r14 + r8 + 5], 7 - LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] - QUAD $0x051e54203a0f4366; BYTE $0x08 // pinsrb xmm2, byte [r14 + r11 + 5], 8 + QUAD $0x051e54203a0f4166; BYTE $0x04 // pinsrb xmm2, byte [r14 + rbx + 5], 4 + WORD $0x8949; BYTE $0xdc // mov r12, rbx + QUAD $0x00000098249c8948 // mov qword [rsp + 152], rbx + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] + QUAD $0x052e54203a0f4366; BYTE $0x05 // pinsrb xmm2, byte [r14 + r13 + 5], 5 + QUAD $0x051e54203a0f4366; BYTE $0x06 // pinsrb xmm2, byte [r14 + r11 + 5], 6 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x053654203a0f4166; BYTE $0x07 // pinsrb xmm2, byte [r14 + rsi + 5], 7 + QUAD $0x051654203a0f4366; BYTE $0x08 // pinsrb xmm2, byte [r14 + r10 + 5], 8 + LONG $0x247c8b4c; BYTE $0x58 // mov r15, qword [rsp + 88] + QUAD $0x053e54203a0f4366; BYTE $0x09 // pinsrb xmm2, byte [r14 + r15 + 5], 9 + QUAD $0x050654203a0f4366; BYTE $0x0a // pinsrb xmm2, byte [r14 + r8 + 5], 10 + QUAD $0x050e54203a0f4166; BYTE $0x0b // pinsrb xmm2, byte [r14 + rcx + 5], 11 + QUAD $0x053e54203a0f4166; BYTE $0x0c // pinsrb xmm2, byte [r14 + rdi + 5], 12 + QUAD $0x051654203a0f4166; BYTE $0x0d // pinsrb xmm2, byte [r14 + rdx + 5], 13 + LONG $0x245c8b4c; BYTE $0x38 // mov r11, qword [rsp + 56] + QUAD $0x051e54203a0f4366; BYTE $0x0e // pinsrb xmm2, byte [r14 + r11 + 5], 14 + LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + QUAD $0x051654203a0f4366; BYTE $0x0f // pinsrb xmm2, byte [r14 + r10 + 5], 15 + QUAD $0x060e44203a0f4766; BYTE $0x01 // pinsrb xmm8, byte [r14 + r9 + 6], 1 + QUAD $0x060644203a0f4566; BYTE $0x02 // pinsrb xmm8, byte [r14 + rax + 6], 2 + LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] + QUAD $0x061e44203a0f4566; BYTE $0x03 // pinsrb xmm8, byte [r14 + rbx + 6], 3 + QUAD $0x062644203a0f4766; BYTE $0x04 // pinsrb xmm8, byte [r14 + r12 + 6], 4 + QUAD $0x062e44203a0f4766; BYTE $0x05 // pinsrb xmm8, byte [r14 + r13 + 6], 5 + QUAD $0x0000008824a48b4c // mov r12, qword [rsp + 136] + QUAD $0x062644203a0f4766; BYTE $0x06 // pinsrb xmm8, byte [r14 + r12 + 6], 6 + QUAD $0x063644203a0f4566; BYTE $0x07 // pinsrb xmm8, byte [r14 + rsi + 6], 7 QUAD $0x0000009024a48b4c // mov r12, qword [rsp + 144] - QUAD $0x052654203a0f4366; BYTE $0x09 // pinsrb xmm2, byte [r14 + r12 + 5], 9 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x053654203a0f4166; BYTE $0x0a // pinsrb xmm2, byte [r14 + rsi + 5], 10 - LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] - QUAD $0x050654203a0f4366; BYTE $0x0b // pinsrb xmm2, byte [r14 + r8 + 5], 11 - WORD $0x8948; BYTE $0xc1 // mov rcx, rax - QUAD $0x050654203a0f4166; BYTE $0x0c // pinsrb xmm2, byte [r14 + rax + 5], 12 - QUAD $0x051e54203a0f4166; BYTE $0x0d // pinsrb xmm2, byte [r14 + rbx + 5], 13 - LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] - QUAD $0x053e54203a0f4366; BYTE $0x0e // pinsrb xmm2, byte [r14 + r15 + 5], 14 - LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] - QUAD $0x053e54203a0f4366; BYTE $0x0f // pinsrb xmm2, byte [r14 + r15 + 5], 15 - QUAD $0x061644203a0f4566; BYTE $0x01 // pinsrb xmm8, byte [r14 + rdx + 6], 1 - QUAD $0x063e44203a0f4566; BYTE $0x02 // pinsrb xmm8, byte [r14 + rdi + 6], 2 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x060644203a0f4566; BYTE $0x03 // pinsrb xmm8, byte [r14 + rax + 6], 3 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x060644203a0f4566; BYTE $0x04 // pinsrb xmm8, byte [r14 + rax + 6], 4 - QUAD $0x060e44203a0f4766; BYTE $0x05 // pinsrb xmm8, byte [r14 + r9 + 6], 5 - QUAD $0x061644203a0f4766; BYTE $0x06 // pinsrb xmm8, byte [r14 + r10 + 6], 6 - QUAD $0x062e44203a0f4766; BYTE $0x07 // pinsrb xmm8, byte [r14 + r13 + 6], 7 - WORD $0x894d; BYTE $0xea // mov r10, r13 - QUAD $0x0000008824ac894c // mov qword [rsp + 136], r13 - QUAD $0x061e44203a0f4766; BYTE $0x08 // pinsrb xmm8, byte [r14 + r11 + 6], 8 - QUAD $0x062644203a0f4766; BYTE $0x09 // pinsrb xmm8, byte [r14 + r12 + 6], 9 - QUAD $0x063644203a0f4566; BYTE $0x0a // pinsrb xmm8, byte [r14 + rsi + 6], 10 - QUAD $0x060644203a0f4766; BYTE $0x0b // pinsrb xmm8, byte [r14 + r8 + 6], 11 - QUAD $0x060e44203a0f4566; BYTE $0x0c // pinsrb xmm8, byte [r14 + rcx + 6], 12 - QUAD $0x061e44203a0f4566; BYTE $0x0d // pinsrb xmm8, byte [r14 + rbx + 6], 13 - LONG $0x246c8b4c; BYTE $0x28 // mov r13, qword [rsp + 40] - QUAD $0x062e44203a0f4766; BYTE $0x0e // pinsrb xmm8, byte [r14 + r13 + 6], 14 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - QUAD $0x063e44203a0f4766; BYTE $0x0f // pinsrb xmm8, byte [r14 + r15 + 6], 15 - QUAD $0x071674203a0f4566; BYTE $0x01 // pinsrb xmm14, byte [r14 + rdx + 7], 1 - QUAD $0x073e74203a0f4566; BYTE $0x02 // pinsrb xmm14, byte [r14 + rdi + 7], 2 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x070674203a0f4566; BYTE $0x03 // pinsrb xmm14, byte [r14 + rax + 7], 3 - WORD $0x8948; BYTE $0xc2 // mov rdx, rax - LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] + QUAD $0x062644203a0f4766; BYTE $0x08 // pinsrb xmm8, byte [r14 + r12 + 6], 8 + QUAD $0x063e44203a0f4766; BYTE $0x09 // pinsrb xmm8, byte [r14 + r15 + 6], 9 + QUAD $0x060644203a0f4766; BYTE $0x0a // pinsrb xmm8, byte [r14 + r8 + 6], 10 + QUAD $0x060e44203a0f4566; BYTE $0x0b // pinsrb xmm8, byte [r14 + rcx + 6], 11 + QUAD $0x063e44203a0f4566; BYTE $0x0c // pinsrb xmm8, byte [r14 + rdi + 6], 12 + QUAD $0x061644203a0f4566; BYTE $0x0d // pinsrb xmm8, byte [r14 + rdx + 6], 13 + QUAD $0x061e44203a0f4766; BYTE $0x0e // pinsrb xmm8, byte [r14 + r11 + 6], 14 + QUAD $0x061644203a0f4766; BYTE $0x0f // pinsrb xmm8, byte [r14 + r10 + 6], 15 + QUAD $0x070e74203a0f4766; BYTE $0x01 // pinsrb xmm14, byte [r14 + r9 + 7], 1 + QUAD $0x070674203a0f4566; BYTE $0x02 // pinsrb xmm14, byte [r14 + rax + 7], 2 + WORD $0x8949; BYTE $0xc7 // mov r15, rax + QUAD $0x071e74203a0f4566; BYTE $0x03 // pinsrb xmm14, byte [r14 + rbx + 7], 3 + QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] QUAD $0x071e74203a0f4766; BYTE $0x04 // pinsrb xmm14, byte [r14 + r11 + 7], 4 - QUAD $0x070e74203a0f4766; BYTE $0x05 // pinsrb xmm14, byte [r14 + r9 + 7], 5 - QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] - QUAD $0x070e74203a0f4766; BYTE $0x06 // pinsrb xmm14, byte [r14 + r9 + 7], 6 - QUAD $0x071674203a0f4766; BYTE $0x07 // pinsrb xmm14, byte [r14 + r10 + 7], 7 - LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] - QUAD $0x070674203a0f4566; BYTE $0x08 // pinsrb xmm14, byte [r14 + rax + 7], 8 + QUAD $0x072e74203a0f4766; BYTE $0x05 // pinsrb xmm14, byte [r14 + r13 + 7], 5 + QUAD $0x0000008824848b48 // mov rax, qword [rsp + 136] + QUAD $0x070674203a0f4566; BYTE $0x06 // pinsrb xmm14, byte [r14 + rax + 7], 6 + QUAD $0x073674203a0f4566; BYTE $0x07 // pinsrb xmm14, byte [r14 + rsi + 7], 7 + WORD $0x894d; BYTE $0xe1 // mov r9, r12 + QUAD $0x072674203a0f4766; BYTE $0x08 // pinsrb xmm14, byte [r14 + r12 + 7], 8 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] QUAD $0x072674203a0f4766; BYTE $0x09 // pinsrb xmm14, byte [r14 + r12 + 7], 9 - QUAD $0x073674203a0f4566; BYTE $0x0a // pinsrb xmm14, byte [r14 + rsi + 7], 10 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] - QUAD $0x070674203a0f4566; BYTE $0x0b // pinsrb xmm14, byte [r14 + rax + 7], 11 - QUAD $0x070e74203a0f4566; BYTE $0x0c // pinsrb xmm14, byte [r14 + rcx + 7], 12 - QUAD $0x071e74203a0f4566; BYTE $0x0d // pinsrb xmm14, byte [r14 + rbx + 7], 13 - QUAD $0x072e74203a0f4766; BYTE $0x0e // pinsrb xmm14, byte [r14 + r13 + 7], 14 + QUAD $0x070674203a0f4766; BYTE $0x0a // pinsrb xmm14, byte [r14 + r8 + 7], 10 + QUAD $0x070e74203a0f4566; BYTE $0x0b // pinsrb xmm14, byte [r14 + rcx + 7], 11 + QUAD $0x073e74203a0f4566; BYTE $0x0c // pinsrb xmm14, byte [r14 + rdi + 7], 12 + QUAD $0x071674203a0f4566; BYTE $0x0d // pinsrb xmm14, byte [r14 + rdx + 7], 13 + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x073e74203a0f4566; BYTE $0x0e // pinsrb xmm14, byte [r14 + rdi + 7], 14 LONG $0x6f0f4166; BYTE $0xce // movdqa xmm1, xmm14 QUAD $0x00b024b46f0f4466; WORD $0x0000 // movdqa xmm14, oword [rsp + 176] LONG $0x740f4166; BYTE $0xd6 // pcmpeqb xmm2, xmm14 @@ -27095,147 +28354,152 @@ LBB5_86: QUAD $0x00000150856f0f66 // movdqa xmm0, oword 336[rbp] /* [rip + .LCPI5_21] */ LONG $0xdf0f4466; BYTE $0xc0 // pandn xmm8, xmm0 LONG $0xeb0f4466; BYTE $0xc2 // por xmm8, xmm2 - LONG $0x24548b4c; BYTE $0x50 // mov r10, qword [rsp + 80] + QUAD $0x0000008024948b4c // mov r10, qword [rsp + 128] LONG $0x74b60f43; WORD $0x1416 // movzx esi, byte [r14 + r10 + 20] LONG $0xde6e0f66 // movd xmm3, esi - QUAD $0x073e4c203a0f4366; BYTE $0x0f // pinsrb xmm1, byte [r14 + r15 + 7], 15 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x07064c203a0f4166; BYTE $0x0f // pinsrb xmm1, byte [r14 + rax + 7], 15 LONG $0x740f4166; BYTE $0xce // pcmpeqb xmm1, xmm14 LONG $0x456f0f66; BYTE $0x60 // movdqa xmm0, oword 96[rbp] /* [rip + .LCPI5_6] */ LONG $0xc8df0f66 // pandn xmm1, xmm0 LONG $0xeb0f4166; BYTE $0xc8 // por xmm1, xmm8 LONG $0x74b60f43; WORD $0x1516 // movzx esi, byte [r14 + r10 + 21] LONG $0xd66e0f66 // movd xmm2, esi - QUAD $0x00010024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 256] - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] - QUAD $0x080e44203a0f4166; BYTE $0x01 // pinsrb xmm0, byte [r14 + rcx + 8], 1 - QUAD $0x083e44203a0f4166; BYTE $0x02 // pinsrb xmm0, byte [r14 + rdi + 8], 2 - WORD $0x8949; BYTE $0xd5 // mov r13, rdx - QUAD $0x081644203a0f4166; BYTE $0x03 // pinsrb xmm0, byte [r14 + rdx + 8], 3 + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + QUAD $0x0000f024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 240] + QUAD $0x081644203a0f4166; BYTE $0x01 // pinsrb xmm0, byte [r14 + rdx + 8], 1 + QUAD $0x083e44203a0f4366; BYTE $0x02 // pinsrb xmm0, byte [r14 + r15 + 8], 2 + QUAD $0x081e44203a0f4166; BYTE $0x03 // pinsrb xmm0, byte [r14 + rbx + 8], 3 + WORD $0x8949; BYTE $0xdf // mov r15, rbx + WORD $0x894c; BYTE $0xdb // mov rbx, r11 QUAD $0x081e44203a0f4366; BYTE $0x04 // pinsrb xmm0, byte [r14 + r11 + 8], 4 - WORD $0x894c; BYTE $0xda // mov rdx, r11 - LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] - QUAD $0x080644203a0f4366; BYTE $0x05 // pinsrb xmm0, byte [r14 + r8 + 8], 5 - QUAD $0x080e44203a0f4366; BYTE $0x06 // pinsrb xmm0, byte [r14 + r9 + 8], 6 - WORD $0x894d; BYTE $0xcf // mov r15, r9 - QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] - QUAD $0x083e44203a0f4166; BYTE $0x07 // pinsrb xmm0, byte [r14 + rdi + 8], 7 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] - QUAD $0x081e44203a0f4166; BYTE $0x08 // pinsrb xmm0, byte [r14 + rbx + 8], 8 + WORD $0x894d; BYTE $0xeb // mov r11, r13 + QUAD $0x082e44203a0f4366; BYTE $0x05 // pinsrb xmm0, byte [r14 + r13 + 8], 5 + QUAD $0x0000008824ac8b4c // mov r13, qword [rsp + 136] + QUAD $0x082e44203a0f4366; BYTE $0x06 // pinsrb xmm0, byte [r14 + r13 + 8], 6 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x080e44203a0f4166; BYTE $0x07 // pinsrb xmm0, byte [r14 + rcx + 8], 7 + QUAD $0x080e44203a0f4366; BYTE $0x08 // pinsrb xmm0, byte [r14 + r9 + 8], 8 QUAD $0x082644203a0f4366; BYTE $0x09 // pinsrb xmm0, byte [r14 + r12 + 8], 9 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x083644203a0f4166; BYTE $0x0a // pinsrb xmm0, byte [r14 + rsi + 8], 10 - QUAD $0x080644203a0f4166; BYTE $0x0b // pinsrb xmm0, byte [r14 + rax + 8], 11 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x080644203a0f4166; BYTE $0x0c // pinsrb xmm0, byte [r14 + rax + 8], 12 - LONG $0x244c8b4c; BYTE $0x78 // mov r9, qword [rsp + 120] - QUAD $0x080e44203a0f4366; BYTE $0x0d // pinsrb xmm0, byte [r14 + r9 + 8], 13 - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] - QUAD $0x081e44203a0f4366; BYTE $0x0e // pinsrb xmm0, byte [r14 + r11 + 8], 14 - LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] - QUAD $0x083644203a0f4166; BYTE $0x0f // pinsrb xmm0, byte [r14 + rsi + 8], 15 + QUAD $0x080644203a0f4366; BYTE $0x0a // pinsrb xmm0, byte [r14 + r8 + 8], 10 + LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] + QUAD $0x080644203a0f4366; BYTE $0x0b // pinsrb xmm0, byte [r14 + r8 + 8], 11 + LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] + QUAD $0x080e44203a0f4366; BYTE $0x0c // pinsrb xmm0, byte [r14 + r9 + 8], 12 + LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] + QUAD $0x083644203a0f4166; BYTE $0x0d // pinsrb xmm0, byte [r14 + rsi + 8], 13 + QUAD $0x083e44203a0f4166; BYTE $0x0e // pinsrb xmm0, byte [r14 + rdi + 8], 14 + QUAD $0x080644203a0f4166; BYTE $0x0f // pinsrb xmm0, byte [r14 + rax + 8], 15 LONG $0xeb0f4166; BYTE $0xc9 // por xmm1, xmm9 - QUAD $0x000100248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm1 + QUAD $0x0000f0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm1 LONG $0x74b60f43; WORD $0x1616 // movzx esi, byte [r14 + r10 + 22] LONG $0xce6e0f66 // movd xmm1, esi LONG $0x740f4166; BYTE $0xc6 // pcmpeqb xmm0, xmm14 - QUAD $0x090e5c203a0f4566; BYTE $0x01 // pinsrb xmm11, byte [r14 + rcx + 9], 1 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x09165c203a0f4566; BYTE $0x01 // pinsrb xmm11, byte [r14 + rdx + 9], 1 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x09065c203a0f4566; BYTE $0x02 // pinsrb xmm11, byte [r14 + rax + 9], 2 - QUAD $0x092e5c203a0f4766; BYTE $0x03 // pinsrb xmm11, byte [r14 + r13 + 9], 3 - QUAD $0x09165c203a0f4566; BYTE $0x04 // pinsrb xmm11, byte [r14 + rdx + 9], 4 - QUAD $0x09065c203a0f4766; BYTE $0x05 // pinsrb xmm11, byte [r14 + r8 + 9], 5 - WORD $0x894d; BYTE $0xfa // mov r10, r15 - QUAD $0x093e5c203a0f4766; BYTE $0x06 // pinsrb xmm11, byte [r14 + r15 + 9], 6 - QUAD $0x093e5c203a0f4566; BYTE $0x07 // pinsrb xmm11, byte [r14 + rdi + 9], 7 - WORD $0x8949; BYTE $0xff // mov r15, rdi - QUAD $0x091e5c203a0f4566; BYTE $0x08 // pinsrb xmm11, byte [r14 + rbx + 9], 8 + QUAD $0x093e5c203a0f4766; BYTE $0x03 // pinsrb xmm11, byte [r14 + r15 + 9], 3 + QUAD $0x091e5c203a0f4566; BYTE $0x04 // pinsrb xmm11, byte [r14 + rbx + 9], 4 + WORD $0x894c; BYTE $0xda // mov rdx, r11 + QUAD $0x091e5c203a0f4766; BYTE $0x05 // pinsrb xmm11, byte [r14 + r11 + 9], 5 + QUAD $0x092e5c203a0f4766; BYTE $0x06 // pinsrb xmm11, byte [r14 + r13 + 9], 6 + QUAD $0x090e5c203a0f4566; BYTE $0x07 // pinsrb xmm11, byte [r14 + rcx + 9], 7 + QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] + QUAD $0x09165c203a0f4766; BYTE $0x08 // pinsrb xmm11, byte [r14 + r10 + 9], 8 + WORD $0x894c; BYTE $0xe1 // mov rcx, r12 QUAD $0x09265c203a0f4766; BYTE $0x09 // pinsrb xmm11, byte [r14 + r12 + 9], 9 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] QUAD $0x09365c203a0f4566; BYTE $0x0a // pinsrb xmm11, byte [r14 + rsi + 9], 10 - LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] - QUAD $0x09165c203a0f4566; BYTE $0x0b // pinsrb xmm11, byte [r14 + rdx + 9], 11 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] - QUAD $0x093e5c203a0f4566; BYTE $0x0c // pinsrb xmm11, byte [r14 + rdi + 9], 12 - QUAD $0x090e5c203a0f4766; BYTE $0x0d // pinsrb xmm11, byte [r14 + r9 + 9], 13 - QUAD $0x091e5c203a0f4766; BYTE $0x0e // pinsrb xmm11, byte [r14 + r11 + 9], 14 - LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] + WORD $0x894c; BYTE $0xc2 // mov rdx, r8 + QUAD $0x09065c203a0f4766; BYTE $0x0b // pinsrb xmm11, byte [r14 + r8 + 9], 11 + WORD $0x894c; BYTE $0xcf // mov rdi, r9 + QUAD $0x090e5c203a0f4766; BYTE $0x0c // pinsrb xmm11, byte [r14 + r9 + 9], 12 + LONG $0x245c8b4c; BYTE $0x40 // mov r11, qword [rsp + 64] + QUAD $0x091e5c203a0f4766; BYTE $0x0d // pinsrb xmm11, byte [r14 + r11 + 9], 13 + LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] + QUAD $0x09265c203a0f4766; BYTE $0x0e // pinsrb xmm11, byte [r14 + r12 + 9], 14 + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] QUAD $0x09065c203a0f4766; BYTE $0x0f // pinsrb xmm11, byte [r14 + r8 + 9], 15 - QUAD $0x0a0e64203a0f4566; BYTE $0x01 // pinsrb xmm12, byte [r14 + rcx + 10], 1 + LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] + QUAD $0x0a0e64203a0f4766; BYTE $0x01 // pinsrb xmm12, byte [r14 + r9 + 10], 1 QUAD $0x0a0664203a0f4566; BYTE $0x02 // pinsrb xmm12, byte [r14 + rax + 10], 2 - QUAD $0x0a2e64203a0f4766; BYTE $0x03 // pinsrb xmm12, byte [r14 + r13 + 10], 3 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + QUAD $0x0a3e64203a0f4766; BYTE $0x03 // pinsrb xmm12, byte [r14 + r15 + 10], 3 QUAD $0x0a1e64203a0f4566; BYTE $0x04 // pinsrb xmm12, byte [r14 + rbx + 10], 4 - LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] - QUAD $0x0a2e64203a0f4766; BYTE $0x05 // pinsrb xmm12, byte [r14 + r13 + 10], 5 - QUAD $0x0a1664203a0f4766; BYTE $0x06 // pinsrb xmm12, byte [r14 + r10 + 10], 6 + LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] + QUAD $0x0a1e64203a0f4566; BYTE $0x05 // pinsrb xmm12, byte [r14 + rbx + 10], 5 + QUAD $0x0a2e64203a0f4766; BYTE $0x06 // pinsrb xmm12, byte [r14 + r13 + 10], 6 + LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] QUAD $0x0a3e64203a0f4766; BYTE $0x07 // pinsrb xmm12, byte [r14 + r15 + 10], 7 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] - QUAD $0x0a1e64203a0f4566; BYTE $0x08 // pinsrb xmm12, byte [r14 + rbx + 10], 8 - QUAD $0x0a2664203a0f4766; BYTE $0x09 // pinsrb xmm12, byte [r14 + r12 + 10], 9 + QUAD $0x0a1664203a0f4766; BYTE $0x08 // pinsrb xmm12, byte [r14 + r10 + 10], 8 + QUAD $0x0a0e64203a0f4566; BYTE $0x09 // pinsrb xmm12, byte [r14 + rcx + 10], 9 QUAD $0x0a3664203a0f4566; BYTE $0x0a // pinsrb xmm12, byte [r14 + rsi + 10], 10 QUAD $0x0a1664203a0f4566; BYTE $0x0b // pinsrb xmm12, byte [r14 + rdx + 10], 11 QUAD $0x0a3e64203a0f4566; BYTE $0x0c // pinsrb xmm12, byte [r14 + rdi + 10], 12 - QUAD $0x0a0e64203a0f4766; BYTE $0x0d // pinsrb xmm12, byte [r14 + r9 + 10], 13 - QUAD $0x0a1e64203a0f4766; BYTE $0x0e // pinsrb xmm12, byte [r14 + r11 + 10], 14 + QUAD $0x0a1e64203a0f4766; BYTE $0x0d // pinsrb xmm12, byte [r14 + r11 + 10], 13 + QUAD $0x0a2664203a0f4766; BYTE $0x0e // pinsrb xmm12, byte [r14 + r12 + 10], 14 + WORD $0x894d; BYTE $0xe3 // mov r11, r12 QUAD $0x0a0664203a0f4766; BYTE $0x0f // pinsrb xmm12, byte [r14 + r8 + 10], 15 - QUAD $0x0b0e6c203a0f4566; BYTE $0x01 // pinsrb xmm13, byte [r14 + rcx + 11], 1 + WORD $0x894d; BYTE $0xc4 // mov r12, r8 + QUAD $0x0b0e6c203a0f4766; BYTE $0x01 // pinsrb xmm13, byte [r14 + r9 + 11], 1 QUAD $0x0b066c203a0f4566; BYTE $0x02 // pinsrb xmm13, byte [r14 + rax + 11], 2 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0b066c203a0f4566; BYTE $0x03 // pinsrb xmm13, byte [r14 + rax + 11], 3 + WORD $0x8949; BYTE $0xc0 // mov r8, rax + LONG $0x247c8b4c; BYTE $0x78 // mov r15, qword [rsp + 120] + QUAD $0x0b3e6c203a0f4766; BYTE $0x03 // pinsrb xmm13, byte [r14 + r15 + 11], 3 + QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] + QUAD $0x0b0e6c203a0f4766; BYTE $0x04 // pinsrb xmm13, byte [r14 + r9 + 11], 4 + QUAD $0x0b1e6c203a0f4566; BYTE $0x05 // pinsrb xmm13, byte [r14 + rbx + 11], 5 + QUAD $0x0b2e6c203a0f4766; BYTE $0x06 // pinsrb xmm13, byte [r14 + r13 + 11], 6 LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0b066c203a0f4566; BYTE $0x04 // pinsrb xmm13, byte [r14 + rax + 11], 4 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0b066c203a0f4566; BYTE $0x05 // pinsrb xmm13, byte [r14 + rax + 11], 5 - QUAD $0x0b166c203a0f4766; BYTE $0x06 // pinsrb xmm13, byte [r14 + r10 + 11], 6 - QUAD $0x0b3e6c203a0f4766; BYTE $0x07 // pinsrb xmm13, byte [r14 + r15 + 11], 7 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] - QUAD $0x0b2e6c203a0f4766; BYTE $0x08 // pinsrb xmm13, byte [r14 + r13 + 11], 8 - QUAD $0x0b266c203a0f4766; BYTE $0x09 // pinsrb xmm13, byte [r14 + r12 + 11], 9 + QUAD $0x0b066c203a0f4566; BYTE $0x07 // pinsrb xmm13, byte [r14 + rax + 11], 7 + QUAD $0x0b166c203a0f4766; BYTE $0x08 // pinsrb xmm13, byte [r14 + r10 + 11], 8 + QUAD $0x0b0e6c203a0f4566; BYTE $0x09 // pinsrb xmm13, byte [r14 + rcx + 11], 9 QUAD $0x0b366c203a0f4566; BYTE $0x0a // pinsrb xmm13, byte [r14 + rsi + 11], 10 QUAD $0x0b166c203a0f4566; BYTE $0x0b // pinsrb xmm13, byte [r14 + rdx + 11], 11 QUAD $0x0b3e6c203a0f4566; BYTE $0x0c // pinsrb xmm13, byte [r14 + rdi + 11], 12 - QUAD $0x0b0e6c203a0f4766; BYTE $0x0d // pinsrb xmm13, byte [r14 + r9 + 11], 13 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0b066c203a0f4566; BYTE $0x0d // pinsrb xmm13, byte [r14 + rax + 11], 13 QUAD $0x0b1e6c203a0f4766; BYTE $0x0e // pinsrb xmm13, byte [r14 + r11 + 11], 14 - WORD $0x894c; BYTE $0xd8 // mov rax, r11 - QUAD $0x0b066c203a0f4766; BYTE $0x0f // pinsrb xmm13, byte [r14 + r8 + 11], 15 + QUAD $0x0b266c203a0f4766; BYTE $0x0f // pinsrb xmm13, byte [r14 + r12 + 11], 15 LONG $0x740f4566; BYTE $0xde // pcmpeqb xmm11, xmm14 QUAD $0x0001009ddf0f4466; BYTE $0x00 // pandn xmm11, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xfc0f4466; BYTE $0xd8 // paddb xmm11, xmm0 - LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] - LONG $0x74b60f41; WORD $0x171e // movzx esi, byte [r14 + rbx + 23] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + LONG $0x74b60f41; WORD $0x1706 // movzx esi, byte [r14 + rax + 23] LONG $0x6e0f4466; BYTE $0xc6 // movd xmm8, esi LONG $0x740f4566; BYTE $0xe6 // pcmpeqb xmm12, xmm14 QUAD $0x000110a5df0f4466; BYTE $0x00 // pandn xmm12, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0x740f4566; BYTE $0xee // pcmpeqb xmm13, xmm14 QUAD $0x000120addf0f4466; BYTE $0x00 // pandn xmm13, oword 288[rbp] /* [rip + .LCPI5_18] */ LONG $0xeb0f4566; BYTE $0xec // por xmm13, xmm12 - LONG $0x74b60f41; WORD $0x181e // movzx esi, byte [r14 + rbx + 24] + LONG $0x74b60f41; WORD $0x1806 // movzx esi, byte [r14 + rax + 24] LONG $0x6e0f4466; BYTE $0xe6 // movd xmm12, esi - QUAD $0x00e0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 224] - QUAD $0x0c0e4c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rcx + 12], 1 - LONG $0x24648b4c; BYTE $0x48 // mov r12, qword [rsp + 72] - QUAD $0x0c264c203a0f4766; BYTE $0x02 // pinsrb xmm9, byte [r14 + r12 + 12], 2 - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + QUAD $0x00d0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 208] + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0c064c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rax + 12], 1 + WORD $0x894d; BYTE $0xc4 // mov r12, r8 + QUAD $0x0c064c203a0f4766; BYTE $0x02 // pinsrb xmm9, byte [r14 + r8 + 12], 2 QUAD $0x0c3e4c203a0f4766; BYTE $0x03 // pinsrb xmm9, byte [r14 + r15 + 12], 3 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] - QUAD $0x0c1e4c203a0f4566; BYTE $0x04 // pinsrb xmm9, byte [r14 + rbx + 12], 4 + WORD $0x894c; BYTE $0xcb // mov rbx, r9 + QUAD $0x0c0e4c203a0f4766; BYTE $0x04 // pinsrb xmm9, byte [r14 + r9 + 12], 4 LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] QUAD $0x0c0e4c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r9 + 12], 5 - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - QUAD $0x0c164c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r10 + 12], 6 - QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] - QUAD $0x0c1e4c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r11 + 12], 7 - QUAD $0x0c2e4c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r13 + 12], 8 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + WORD $0x894d; BYTE $0xeb // mov r11, r13 + QUAD $0x0c2e4c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r13 + 12], 6 + LONG $0x24448b4c; BYTE $0x30 // mov r8, qword [rsp + 48] + QUAD $0x0c064c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r8 + 12], 7 + WORD $0x894c; BYTE $0xd6 // mov rsi, r10 + QUAD $0x0c164c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r10 + 12], 8 + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] QUAD $0x0c0e4c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rcx + 12], 9 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] + LONG $0x24548b4c; BYTE $0x68 // mov r10, qword [rsp + 104] QUAD $0x0c164c203a0f4766; BYTE $0x0a // pinsrb xmm9, byte [r14 + r10 + 12], 10 QUAD $0x0c164c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rdx + 12], 11 QUAD $0x0c3e4c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rdi + 12], 12 - LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] - QUAD $0x0c364c203a0f4566; BYTE $0x0d // pinsrb xmm9, byte [r14 + rsi + 12], 13 + LONG $0x246c8b4c; BYTE $0x40 // mov r13, qword [rsp + 64] + QUAD $0x0c2e4c203a0f4766; BYTE $0x0d // pinsrb xmm9, byte [r14 + r13 + 12], 13 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0c064c203a0f4566; BYTE $0x0e // pinsrb xmm9, byte [r14 + rax + 12], 14 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c064c203a0f4566; BYTE $0x0f // pinsrb xmm9, byte [r14 + rax + 12], 15 LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0d0674203a0f4166; BYTE $0x01 // pinsrb xmm6, byte [r14 + rax + 13], 1 @@ -27243,17 +28507,17 @@ LBB5_86: QUAD $0x0d3e74203a0f4366; BYTE $0x03 // pinsrb xmm6, byte [r14 + r15 + 13], 3 QUAD $0x0d1e74203a0f4166; BYTE $0x04 // pinsrb xmm6, byte [r14 + rbx + 13], 4 QUAD $0x0d0e74203a0f4366; BYTE $0x05 // pinsrb xmm6, byte [r14 + r9 + 13], 5 - QUAD $0x0d0674203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r8 + 13], 6 - QUAD $0x0d1e74203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r11 + 13], 7 - QUAD $0x0d2e74203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r13 + 13], 8 + QUAD $0x0d1e74203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r11 + 13], 6 + QUAD $0x0d0674203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r8 + 13], 7 + QUAD $0x0d3674203a0f4166; BYTE $0x08 // pinsrb xmm6, byte [r14 + rsi + 13], 8 QUAD $0x0d0e74203a0f4166; BYTE $0x09 // pinsrb xmm6, byte [r14 + rcx + 13], 9 QUAD $0x0d1674203a0f4366; BYTE $0x0a // pinsrb xmm6, byte [r14 + r10 + 13], 10 QUAD $0x0d1674203a0f4166; BYTE $0x0b // pinsrb xmm6, byte [r14 + rdx + 13], 11 QUAD $0x0d3e74203a0f4166; BYTE $0x0c // pinsrb xmm6, byte [r14 + rdi + 13], 12 - QUAD $0x0d3674203a0f4166; BYTE $0x0d // pinsrb xmm6, byte [r14 + rsi + 13], 13 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0d2e74203a0f4366; BYTE $0x0d // pinsrb xmm6, byte [r14 + r13 + 13], 13 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] QUAD $0x0d0674203a0f4166; BYTE $0x0e // pinsrb xmm6, byte [r14 + rax + 13], 14 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0d0674203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rax + 13], 15 LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0e067c203a0f4566; BYTE $0x01 // pinsrb xmm15, byte [r14 + rax + 14], 1 @@ -27261,141 +28525,133 @@ LBB5_86: QUAD $0x0e3e7c203a0f4766; BYTE $0x03 // pinsrb xmm15, byte [r14 + r15 + 14], 3 QUAD $0x0e1e7c203a0f4566; BYTE $0x04 // pinsrb xmm15, byte [r14 + rbx + 14], 4 QUAD $0x0e0e7c203a0f4766; BYTE $0x05 // pinsrb xmm15, byte [r14 + r9 + 14], 5 - WORD $0x894c; BYTE $0xcb // mov rbx, r9 - QUAD $0x0e067c203a0f4766; BYTE $0x06 // pinsrb xmm15, byte [r14 + r8 + 14], 6 - WORD $0x894d; BYTE $0xc4 // mov r12, r8 - QUAD $0x0e1e7c203a0f4766; BYTE $0x07 // pinsrb xmm15, byte [r14 + r11 + 14], 7 - QUAD $0x0e2e7c203a0f4766; BYTE $0x08 // pinsrb xmm15, byte [r14 + r13 + 14], 8 + QUAD $0x0e1e7c203a0f4766; BYTE $0x06 // pinsrb xmm15, byte [r14 + r11 + 14], 6 + QUAD $0x0e067c203a0f4766; BYTE $0x07 // pinsrb xmm15, byte [r14 + r8 + 14], 7 + QUAD $0x0e367c203a0f4566; BYTE $0x08 // pinsrb xmm15, byte [r14 + rsi + 14], 8 QUAD $0x0e0e7c203a0f4566; BYTE $0x09 // pinsrb xmm15, byte [r14 + rcx + 14], 9 - WORD $0x8949; BYTE $0xcb // mov r11, rcx QUAD $0x0e167c203a0f4766; BYTE $0x0a // pinsrb xmm15, byte [r14 + r10 + 14], 10 QUAD $0x0e167c203a0f4566; BYTE $0x0b // pinsrb xmm15, byte [r14 + rdx + 14], 11 QUAD $0x0e3e7c203a0f4566; BYTE $0x0c // pinsrb xmm15, byte [r14 + rdi + 14], 12 - QUAD $0x0e367c203a0f4566; BYTE $0x0d // pinsrb xmm15, byte [r14 + rsi + 14], 13 - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] - QUAD $0x0e067c203a0f4566; BYTE $0x0e // pinsrb xmm15, byte [r14 + rax + 14], 14 + QUAD $0x0e2e7c203a0f4766; BYTE $0x0d // pinsrb xmm15, byte [r14 + r13 + 14], 13 + LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] + QUAD $0x0e267c203a0f4766; BYTE $0x0e // pinsrb xmm15, byte [r14 + r12 + 14], 14 LONG $0x740f4566; BYTE $0xce // pcmpeqb xmm9, xmm14 QUAD $0x0001308ddf0f4466; BYTE $0x00 // pandn xmm9, oword 304[rbp] /* [rip + .LCPI5_19] */ LONG $0xeb0f4566; BYTE $0xcd // por xmm9, xmm13 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - LONG $0x74b60f41; WORD $0x1906 // movzx esi, byte [r14 + rax + 25] + QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] + LONG $0x74b60f43; WORD $0x193e // movzx esi, byte [r14 + r15 + 25] LONG $0x6e0f4466; BYTE $0xee // movd xmm13, esi QUAD $0x0001609df80f4466; BYTE $0x00 // psubb xmm11, oword 352[rbp] /* [rip + .LCPI5_22] */ LONG $0xeb0f4566; BYTE $0xcb // por xmm9, xmm11 - LONG $0x74b60f41; WORD $0x1a06 // movzx esi, byte [r14 + rax + 26] + LONG $0x74b60f43; WORD $0x1a3e // movzx esi, byte [r14 + r15 + 26] LONG $0xc66e0f66 // movd xmm0, esi - LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] - QUAD $0x0e0e7c203a0f4566; BYTE $0x0f // pinsrb xmm15, byte [r14 + rcx + 14], 15 + LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] + QUAD $0x0e1e7c203a0f4566; BYTE $0x0f // pinsrb xmm15, byte [r14 + rbx + 14], 15 LONG $0x740f4166; BYTE $0xf6 // pcmpeqb xmm6, xmm14 QUAD $0x00000140b5df0f66 // pandn xmm6, oword 320[rbp] /* [rip + .LCPI5_20] */ LONG $0x740f4566; BYTE $0xfe // pcmpeqb xmm15, xmm14 QUAD $0x000150bddf0f4466; BYTE $0x00 // pandn xmm15, oword 336[rbp] /* [rip + .LCPI5_21] */ LONG $0xeb0f4466; BYTE $0xfe // por xmm15, xmm6 - LONG $0x74b60f41; WORD $0x1b06 // movzx esi, byte [r14 + rax + 27] + LONG $0x74b60f43; WORD $0x1b3e // movzx esi, byte [r14 + r15 + 27] LONG $0x6e0f4466; BYTE $0xde // movd xmm11, esi QUAD $0x0000c024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 192] - LONG $0x247c8b48; BYTE $0x18 // mov rdi, qword [rsp + 24] - QUAD $0x0f3e74203a0f4166; BYTE $0x01 // pinsrb xmm6, byte [r14 + rdi + 15], 1 - LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] - QUAD $0x0f0e74203a0f4366; BYTE $0x02 // pinsrb xmm6, byte [r14 + r9 + 15], 2 - QUAD $0x0f3e74203a0f4366; BYTE $0x03 // pinsrb xmm6, byte [r14 + r15 + 15], 3 - LONG $0x24448b4c; BYTE $0x30 // mov r8, qword [rsp + 48] - QUAD $0x0f0674203a0f4366; BYTE $0x04 // pinsrb xmm6, byte [r14 + r8 + 15], 4 - QUAD $0x0f1e74203a0f4166; BYTE $0x05 // pinsrb xmm6, byte [r14 + rbx + 15], 5 - QUAD $0x0f2674203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r12 + 15], 6 - QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] - QUAD $0x0f3674203a0f4166; BYTE $0x07 // pinsrb xmm6, byte [r14 + rsi + 15], 7 - WORD $0x894d; BYTE $0xef // mov r15, r13 - QUAD $0x0f2e74203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r13 + 15], 8 - QUAD $0x0f1e74203a0f4366; BYTE $0x09 // pinsrb xmm6, byte [r14 + r11 + 15], 9 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0f0674203a0f4166; BYTE $0x01 // pinsrb xmm6, byte [r14 + rax + 15], 1 + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + QUAD $0x0f3e74203a0f4166; BYTE $0x02 // pinsrb xmm6, byte [r14 + rdi + 15], 2 + LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] + QUAD $0x0f1674203a0f4166; BYTE $0x03 // pinsrb xmm6, byte [r14 + rdx + 15], 3 + QUAD $0x00000098248c8b48 // mov rcx, qword [rsp + 152] + QUAD $0x0f0e74203a0f4166; BYTE $0x04 // pinsrb xmm6, byte [r14 + rcx + 15], 4 + QUAD $0x0f0e74203a0f4366; BYTE $0x05 // pinsrb xmm6, byte [r14 + r9 + 15], 5 + QUAD $0x0f1e74203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r11 + 15], 6 + QUAD $0x0f0674203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r8 + 15], 7 + QUAD $0x0000009024848b4c // mov r8, qword [rsp + 144] + QUAD $0x0f0674203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r8 + 15], 8 + LONG $0x244c8b4c; BYTE $0x58 // mov r9, qword [rsp + 88] + QUAD $0x0f0e74203a0f4366; BYTE $0x09 // pinsrb xmm6, byte [r14 + r9 + 15], 9 QUAD $0x0f1674203a0f4366; BYTE $0x0a // pinsrb xmm6, byte [r14 + r10 + 15], 10 - QUAD $0x0f1674203a0f4166; BYTE $0x0b // pinsrb xmm6, byte [r14 + rdx + 15], 11 - LONG $0x24548b4c; BYTE $0x38 // mov r10, qword [rsp + 56] - QUAD $0x0f1674203a0f4366; BYTE $0x0c // pinsrb xmm6, byte [r14 + r10 + 15], 12 - LONG $0x246c8b4c; BYTE $0x78 // mov r13, qword [rsp + 120] + LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] + QUAD $0x0f1674203a0f4366; BYTE $0x0b // pinsrb xmm6, byte [r14 + r10 + 15], 11 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x0f3674203a0f4166; BYTE $0x0c // pinsrb xmm6, byte [r14 + rsi + 15], 12 QUAD $0x0f2e74203a0f4366; BYTE $0x0d // pinsrb xmm6, byte [r14 + r13 + 15], 13 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] - QUAD $0x0f1674203a0f4166; BYTE $0x0e // pinsrb xmm6, byte [r14 + rdx + 15], 14 - QUAD $0x0f0e74203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rcx + 15], 15 + QUAD $0x0f2674203a0f4366; BYTE $0x0e // pinsrb xmm6, byte [r14 + r12 + 15], 14 + QUAD $0x0f1e74203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rbx + 15], 15 LONG $0x740f4166; BYTE $0xf6 // pcmpeqb xmm6, xmm14 LONG $0x75df0f66; BYTE $0x60 // pandn xmm6, oword 96[rbp] /* [rip + .LCPI5_6] */ LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 - LONG $0x74b60f41; WORD $0x1c06 // movzx esi, byte [r14 + rax + 28] + LONG $0x74b60f43; WORD $0x1c3e // movzx esi, byte [r14 + r15 + 28] LONG $0x6e0f4466; BYTE $0xfe // movd xmm15, esi LONG $0xeb0f4166; BYTE $0xf1 // por xmm6, xmm9 QUAD $0x0000c024b47f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm6 - LONG $0x74b60f41; WORD $0x1d06 // movzx esi, byte [r14 + rax + 29] + LONG $0x74b60f43; WORD $0x1d3e // movzx esi, byte [r14 + r15 + 29] LONG $0x6e0f4466; BYTE $0xce // movd xmm9, esi - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - QUAD $0x103e54203a0f4566; BYTE $0x01 // pinsrb xmm10, byte [r14 + rdi + 16], 1 - QUAD $0x100e54203a0f4766; BYTE $0x02 // pinsrb xmm10, byte [r14 + r9 + 16], 2 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + QUAD $0x100654203a0f4566; BYTE $0x01 // pinsrb xmm10, byte [r14 + rax + 16], 1 + QUAD $0x103e54203a0f4566; BYTE $0x02 // pinsrb xmm10, byte [r14 + rdi + 16], 2 QUAD $0x101654203a0f4566; BYTE $0x03 // pinsrb xmm10, byte [r14 + rdx + 16], 3 - QUAD $0x100654203a0f4766; BYTE $0x04 // pinsrb xmm10, byte [r14 + r8 + 16], 4 - QUAD $0x101e54203a0f4566; BYTE $0x05 // pinsrb xmm10, byte [r14 + rbx + 16], 5 - QUAD $0x102654203a0f4766; BYTE $0x06 // pinsrb xmm10, byte [r14 + r12 + 16], 6 - QUAD $0x0000008824bc8b48 // mov rdi, qword [rsp + 136] - QUAD $0x103e54203a0f4566; BYTE $0x07 // pinsrb xmm10, byte [r14 + rdi + 16], 7 - WORD $0x894c; BYTE $0xf8 // mov rax, r15 - QUAD $0x103e54203a0f4766; BYTE $0x08 // pinsrb xmm10, byte [r14 + r15 + 16], 8 - QUAD $0x101e54203a0f4766; BYTE $0x09 // pinsrb xmm10, byte [r14 + r11 + 16], 9 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x103e54203a0f4766; BYTE $0x0a // pinsrb xmm10, byte [r14 + r15 + 16], 10 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x100e54203a0f4566; BYTE $0x0b // pinsrb xmm10, byte [r14 + rcx + 16], 11 - QUAD $0x101654203a0f4766; BYTE $0x0c // pinsrb xmm10, byte [r14 + r10 + 16], 12 + QUAD $0x100e54203a0f4566; BYTE $0x04 // pinsrb xmm10, byte [r14 + rcx + 16], 4 + LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x103654203a0f4566; BYTE $0x05 // pinsrb xmm10, byte [r14 + rsi + 16], 5 + QUAD $0x101e54203a0f4766; BYTE $0x06 // pinsrb xmm10, byte [r14 + r11 + 16], 6 + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x103654203a0f4566; BYTE $0x07 // pinsrb xmm10, byte [r14 + rsi + 16], 7 + QUAD $0x100654203a0f4766; BYTE $0x08 // pinsrb xmm10, byte [r14 + r8 + 16], 8 + QUAD $0x100e54203a0f4766; BYTE $0x09 // pinsrb xmm10, byte [r14 + r9 + 16], 9 + LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] + QUAD $0x103654203a0f4566; BYTE $0x0a // pinsrb xmm10, byte [r14 + rsi + 16], 10 + QUAD $0x101654203a0f4766; BYTE $0x0b // pinsrb xmm10, byte [r14 + r10 + 16], 11 + LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] + QUAD $0x103654203a0f4566; BYTE $0x0c // pinsrb xmm10, byte [r14 + rsi + 16], 12 QUAD $0x102e54203a0f4766; BYTE $0x0d // pinsrb xmm10, byte [r14 + r13 + 16], 13 - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x100e54203a0f4566; BYTE $0x0e // pinsrb xmm10, byte [r14 + rcx + 16], 14 - LONG $0x246c8b4c; BYTE $0x10 // mov r13, qword [rsp + 16] - QUAD $0x102e54203a0f4766; BYTE $0x0f // pinsrb xmm10, byte [r14 + r13 + 16], 15 - QUAD $0x113664203a0f4166; BYTE $0x01 // pinsrb xmm4, byte [r14 + rsi + 17], 1 - QUAD $0x110e64203a0f4366; BYTE $0x02 // pinsrb xmm4, byte [r14 + r9 + 17], 2 + QUAD $0x102654203a0f4766; BYTE $0x0e // pinsrb xmm10, byte [r14 + r12 + 16], 14 + QUAD $0x101e54203a0f4566; BYTE $0x0f // pinsrb xmm10, byte [r14 + rbx + 16], 15 + QUAD $0x110664203a0f4166; BYTE $0x01 // pinsrb xmm4, byte [r14 + rax + 17], 1 + QUAD $0x113e64203a0f4166; BYTE $0x02 // pinsrb xmm4, byte [r14 + rdi + 17], 2 QUAD $0x111664203a0f4166; BYTE $0x03 // pinsrb xmm4, byte [r14 + rdx + 17], 3 - QUAD $0x110664203a0f4366; BYTE $0x04 // pinsrb xmm4, byte [r14 + r8 + 17], 4 - QUAD $0x111e64203a0f4166; BYTE $0x05 // pinsrb xmm4, byte [r14 + rbx + 17], 5 - QUAD $0x112664203a0f4366; BYTE $0x06 // pinsrb xmm4, byte [r14 + r12 + 17], 6 - QUAD $0x113e64203a0f4166; BYTE $0x07 // pinsrb xmm4, byte [r14 + rdi + 17], 7 - QUAD $0x110664203a0f4166; BYTE $0x08 // pinsrb xmm4, byte [r14 + rax + 17], 8 - WORD $0x894d; BYTE $0xd9 // mov r9, r11 - QUAD $0x111e64203a0f4366; BYTE $0x09 // pinsrb xmm4, byte [r14 + r11 + 17], 9 - WORD $0x894d; BYTE $0xfa // mov r10, r15 - QUAD $0x113e64203a0f4366; BYTE $0x0a // pinsrb xmm4, byte [r14 + r15 + 17], 10 - LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] - QUAD $0x110664203a0f4366; BYTE $0x0b // pinsrb xmm4, byte [r14 + r8 + 17], 11 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] - QUAD $0x113e64203a0f4166; BYTE $0x0c // pinsrb xmm4, byte [r14 + rdi + 17], 12 - LONG $0x24548b48; BYTE $0x78 // mov rdx, qword [rsp + 120] - QUAD $0x111664203a0f4166; BYTE $0x0d // pinsrb xmm4, byte [r14 + rdx + 17], 13 - QUAD $0x110e64203a0f4166; BYTE $0x0e // pinsrb xmm4, byte [r14 + rcx + 17], 14 - WORD $0x8949; BYTE $0xcb // mov r11, rcx - QUAD $0x112e64203a0f4366; BYTE $0x0f // pinsrb xmm4, byte [r14 + r13 + 17], 15 + QUAD $0x110e64203a0f4166; BYTE $0x04 // pinsrb xmm4, byte [r14 + rcx + 17], 4 + LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] + QUAD $0x110e64203a0f4366; BYTE $0x05 // pinsrb xmm4, byte [r14 + r9 + 17], 5 + QUAD $0x111e64203a0f4366; BYTE $0x06 // pinsrb xmm4, byte [r14 + r11 + 17], 6 + LONG $0x24448b4c; BYTE $0x30 // mov r8, qword [rsp + 48] + QUAD $0x110664203a0f4366; BYTE $0x07 // pinsrb xmm4, byte [r14 + r8 + 17], 7 + QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] + QUAD $0x113e64203a0f4166; BYTE $0x08 // pinsrb xmm4, byte [r14 + rdi + 17], 8 + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x110e64203a0f4166; BYTE $0x09 // pinsrb xmm4, byte [r14 + rcx + 17], 9 + LONG $0x24548b4c; BYTE $0x68 // mov r10, qword [rsp + 104] + QUAD $0x111664203a0f4366; BYTE $0x0a // pinsrb xmm4, byte [r14 + r10 + 17], 10 + LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] + QUAD $0x111664203a0f4166; BYTE $0x0b // pinsrb xmm4, byte [r14 + rdx + 17], 11 + QUAD $0x113664203a0f4166; BYTE $0x0c // pinsrb xmm4, byte [r14 + rsi + 17], 12 + QUAD $0x112e64203a0f4366; BYTE $0x0d // pinsrb xmm4, byte [r14 + r13 + 17], 13 + QUAD $0x112664203a0f4366; BYTE $0x0e // pinsrb xmm4, byte [r14 + r12 + 17], 14 + QUAD $0x111e64203a0f4166; BYTE $0x0f // pinsrb xmm4, byte [r14 + rbx + 17], 15 LONG $0x740f4566; BYTE $0xd6 // pcmpeqb xmm10, xmm14 LONG $0x740f4166; BYTE $0xe6 // pcmpeqb xmm4, xmm14 QUAD $0x00000100b56f0f66 // movdqa xmm6, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xe6df0f66 // pandn xmm4, xmm6 LONG $0xfc0f4166; BYTE $0xe2 // paddb xmm4, xmm10 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - LONG $0x74b60f41; WORD $0x1e06 // movzx esi, byte [r14 + rax + 30] + LONG $0x74b60f43; WORD $0x1e3e // movzx esi, byte [r14 + r15 + 30] LONG $0x6e0f4466; BYTE $0xd6 // movd xmm10, esi - LONG $0x24748b48; BYTE $0x18 // mov rsi, qword [rsp + 24] - QUAD $0x12367c203a0f4166; BYTE $0x01 // pinsrb xmm7, byte [r14 + rsi + 18], 1 - QUAD $0x13366c203a0f4166; BYTE $0x01 // pinsrb xmm5, byte [r14 + rsi + 19], 1 - QUAD $0x14365c203a0f4166; BYTE $0x01 // pinsrb xmm3, byte [r14 + rsi + 20], 1 - QUAD $0x153654203a0f4166; BYTE $0x01 // pinsrb xmm2, byte [r14 + rsi + 21], 1 - QUAD $0x16364c203a0f4166; BYTE $0x01 // pinsrb xmm1, byte [r14 + rsi + 22], 1 - QUAD $0x173644203a0f4566; BYTE $0x01 // pinsrb xmm8, byte [r14 + rsi + 23], 1 - QUAD $0x183664203a0f4566; BYTE $0x01 // pinsrb xmm12, byte [r14 + rsi + 24], 1 - QUAD $0x19366c203a0f4566; BYTE $0x01 // pinsrb xmm13, byte [r14 + rsi + 25], 1 - QUAD $0x1a3644203a0f4166; BYTE $0x01 // pinsrb xmm0, byte [r14 + rsi + 26], 1 - QUAD $0x1b365c203a0f4566; BYTE $0x01 // pinsrb xmm11, byte [r14 + rsi + 27], 1 - QUAD $0x1c367c203a0f4566; BYTE $0x01 // pinsrb xmm15, byte [r14 + rsi + 28], 1 - QUAD $0x1d364c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rsi + 29], 1 - QUAD $0x1e3654203a0f4566; BYTE $0x01 // pinsrb xmm10, byte [r14 + rsi + 30], 1 - LONG $0x44b60f41; WORD $0x1f06 // movzx eax, byte [r14 + rax + 31] + WORD $0x8948; BYTE $0xc6 // mov rsi, rax + QUAD $0x12067c203a0f4166; BYTE $0x01 // pinsrb xmm7, byte [r14 + rax + 18], 1 + QUAD $0x13066c203a0f4166; BYTE $0x01 // pinsrb xmm5, byte [r14 + rax + 19], 1 + QUAD $0x14065c203a0f4166; BYTE $0x01 // pinsrb xmm3, byte [r14 + rax + 20], 1 + QUAD $0x150654203a0f4166; BYTE $0x01 // pinsrb xmm2, byte [r14 + rax + 21], 1 + QUAD $0x16064c203a0f4166; BYTE $0x01 // pinsrb xmm1, byte [r14 + rax + 22], 1 + QUAD $0x170644203a0f4566; BYTE $0x01 // pinsrb xmm8, byte [r14 + rax + 23], 1 + QUAD $0x180664203a0f4566; BYTE $0x01 // pinsrb xmm12, byte [r14 + rax + 24], 1 + QUAD $0x19066c203a0f4566; BYTE $0x01 // pinsrb xmm13, byte [r14 + rax + 25], 1 + QUAD $0x1a0644203a0f4166; BYTE $0x01 // pinsrb xmm0, byte [r14 + rax + 26], 1 + QUAD $0x1b065c203a0f4566; BYTE $0x01 // pinsrb xmm11, byte [r14 + rax + 27], 1 + QUAD $0x1c067c203a0f4566; BYTE $0x01 // pinsrb xmm15, byte [r14 + rax + 28], 1 + QUAD $0x1d064c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rax + 29], 1 + QUAD $0x1e0654203a0f4566; BYTE $0x01 // pinsrb xmm10, byte [r14 + rax + 30], 1 + LONG $0x44b60f43; WORD $0x1f3e // movzx eax, byte [r14 + r15 + 31] LONG $0xf06e0f66 // movd xmm6, eax QUAD $0x1f3674203a0f4166; BYTE $0x01 // pinsrb xmm6, byte [r14 + rsi + 31], 1 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] QUAD $0x12067c203a0f4166; BYTE $0x02 // pinsrb xmm7, byte [r14 + rax + 18], 2 QUAD $0x13066c203a0f4166; BYTE $0x02 // pinsrb xmm5, byte [r14 + rax + 19], 2 QUAD $0x14065c203a0f4166; BYTE $0x02 // pinsrb xmm3, byte [r14 + rax + 20], 2 @@ -27410,48 +28666,48 @@ LBB5_86: QUAD $0x1d064c203a0f4566; BYTE $0x02 // pinsrb xmm9, byte [r14 + rax + 29], 2 QUAD $0x1e0654203a0f4566; BYTE $0x02 // pinsrb xmm10, byte [r14 + rax + 30], 2 QUAD $0x1f0674203a0f4166; BYTE $0x02 // pinsrb xmm6, byte [r14 + rax + 31], 2 - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] + LONG $0x247c8b4c; BYTE $0x78 // mov r15, qword [rsp + 120] QUAD $0x123e7c203a0f4366; BYTE $0x03 // pinsrb xmm7, byte [r14 + r15 + 18], 3 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] QUAD $0x12067c203a0f4166; BYTE $0x04 // pinsrb xmm7, byte [r14 + rax + 18], 4 - QUAD $0x121e7c203a0f4166; BYTE $0x05 // pinsrb xmm7, byte [r14 + rbx + 18], 5 - QUAD $0x12267c203a0f4366; BYTE $0x06 // pinsrb xmm7, byte [r14 + r12 + 18], 6 - QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] - QUAD $0x120e7c203a0f4166; BYTE $0x07 // pinsrb xmm7, byte [r14 + rcx + 18], 7 - LONG $0x24748b48; BYTE $0x68 // mov rsi, qword [rsp + 104] - QUAD $0x12367c203a0f4166; BYTE $0x08 // pinsrb xmm7, byte [r14 + rsi + 18], 8 - QUAD $0x120e7c203a0f4366; BYTE $0x09 // pinsrb xmm7, byte [r14 + r9 + 18], 9 + QUAD $0x120e7c203a0f4366; BYTE $0x05 // pinsrb xmm7, byte [r14 + r9 + 18], 5 + QUAD $0x121e7c203a0f4366; BYTE $0x06 // pinsrb xmm7, byte [r14 + r11 + 18], 6 + QUAD $0x12067c203a0f4366; BYTE $0x07 // pinsrb xmm7, byte [r14 + r8 + 18], 7 + WORD $0x8948; BYTE $0xfe // mov rsi, rdi + QUAD $0x123e7c203a0f4166; BYTE $0x08 // pinsrb xmm7, byte [r14 + rdi + 18], 8 + QUAD $0x120e7c203a0f4166; BYTE $0x09 // pinsrb xmm7, byte [r14 + rcx + 18], 9 QUAD $0x12167c203a0f4366; BYTE $0x0a // pinsrb xmm7, byte [r14 + r10 + 18], 10 - QUAD $0x12067c203a0f4366; BYTE $0x0b // pinsrb xmm7, byte [r14 + r8 + 18], 11 + QUAD $0x12167c203a0f4166; BYTE $0x0b // pinsrb xmm7, byte [r14 + rdx + 18], 11 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] QUAD $0x123e7c203a0f4166; BYTE $0x0c // pinsrb xmm7, byte [r14 + rdi + 18], 12 - QUAD $0x12167c203a0f4166; BYTE $0x0d // pinsrb xmm7, byte [r14 + rdx + 18], 13 - QUAD $0x121e7c203a0f4366; BYTE $0x0e // pinsrb xmm7, byte [r14 + r11 + 18], 14 - QUAD $0x122e7c203a0f4366; BYTE $0x0f // pinsrb xmm7, byte [r14 + r13 + 18], 15 + QUAD $0x122e7c203a0f4366; BYTE $0x0d // pinsrb xmm7, byte [r14 + r13 + 18], 13 + QUAD $0x12267c203a0f4366; BYTE $0x0e // pinsrb xmm7, byte [r14 + r12 + 18], 14 + QUAD $0x121e7c203a0f4166; BYTE $0x0f // pinsrb xmm7, byte [r14 + rbx + 18], 15 QUAD $0x133e6c203a0f4366; BYTE $0x03 // pinsrb xmm5, byte [r14 + r15 + 19], 3 QUAD $0x13066c203a0f4166; BYTE $0x04 // pinsrb xmm5, byte [r14 + rax + 19], 4 - QUAD $0x131e6c203a0f4166; BYTE $0x05 // pinsrb xmm5, byte [r14 + rbx + 19], 5 - QUAD $0x13266c203a0f4366; BYTE $0x06 // pinsrb xmm5, byte [r14 + r12 + 19], 6 - QUAD $0x130e6c203a0f4166; BYTE $0x07 // pinsrb xmm5, byte [r14 + rcx + 19], 7 + QUAD $0x130e6c203a0f4366; BYTE $0x05 // pinsrb xmm5, byte [r14 + r9 + 19], 5 + QUAD $0x131e6c203a0f4366; BYTE $0x06 // pinsrb xmm5, byte [r14 + r11 + 19], 6 + QUAD $0x13066c203a0f4366; BYTE $0x07 // pinsrb xmm5, byte [r14 + r8 + 19], 7 QUAD $0x13366c203a0f4166; BYTE $0x08 // pinsrb xmm5, byte [r14 + rsi + 19], 8 - QUAD $0x130e6c203a0f4366; BYTE $0x09 // pinsrb xmm5, byte [r14 + r9 + 19], 9 + QUAD $0x130e6c203a0f4166; BYTE $0x09 // pinsrb xmm5, byte [r14 + rcx + 19], 9 QUAD $0x13166c203a0f4366; BYTE $0x0a // pinsrb xmm5, byte [r14 + r10 + 19], 10 - QUAD $0x13066c203a0f4366; BYTE $0x0b // pinsrb xmm5, byte [r14 + r8 + 19], 11 + QUAD $0x13166c203a0f4166; BYTE $0x0b // pinsrb xmm5, byte [r14 + rdx + 19], 11 QUAD $0x133e6c203a0f4166; BYTE $0x0c // pinsrb xmm5, byte [r14 + rdi + 19], 12 - QUAD $0x13166c203a0f4166; BYTE $0x0d // pinsrb xmm5, byte [r14 + rdx + 19], 13 - QUAD $0x131e6c203a0f4366; BYTE $0x0e // pinsrb xmm5, byte [r14 + r11 + 19], 14 - QUAD $0x132e6c203a0f4366; BYTE $0x0f // pinsrb xmm5, byte [r14 + r13 + 19], 15 + QUAD $0x132e6c203a0f4366; BYTE $0x0d // pinsrb xmm5, byte [r14 + r13 + 19], 13 + QUAD $0x13266c203a0f4366; BYTE $0x0e // pinsrb xmm5, byte [r14 + r12 + 19], 14 + QUAD $0x131e6c203a0f4166; BYTE $0x0f // pinsrb xmm5, byte [r14 + rbx + 19], 15 QUAD $0x143e5c203a0f4366; BYTE $0x03 // pinsrb xmm3, byte [r14 + r15 + 20], 3 QUAD $0x14065c203a0f4166; BYTE $0x04 // pinsrb xmm3, byte [r14 + rax + 20], 4 - QUAD $0x141e5c203a0f4166; BYTE $0x05 // pinsrb xmm3, byte [r14 + rbx + 20], 5 - QUAD $0x14265c203a0f4366; BYTE $0x06 // pinsrb xmm3, byte [r14 + r12 + 20], 6 - QUAD $0x140e5c203a0f4166; BYTE $0x07 // pinsrb xmm3, byte [r14 + rcx + 20], 7 + QUAD $0x140e5c203a0f4366; BYTE $0x05 // pinsrb xmm3, byte [r14 + r9 + 20], 5 + QUAD $0x141e5c203a0f4366; BYTE $0x06 // pinsrb xmm3, byte [r14 + r11 + 20], 6 + QUAD $0x14065c203a0f4366; BYTE $0x07 // pinsrb xmm3, byte [r14 + r8 + 20], 7 QUAD $0x14365c203a0f4166; BYTE $0x08 // pinsrb xmm3, byte [r14 + rsi + 20], 8 - QUAD $0x140e5c203a0f4366; BYTE $0x09 // pinsrb xmm3, byte [r14 + r9 + 20], 9 + QUAD $0x140e5c203a0f4166; BYTE $0x09 // pinsrb xmm3, byte [r14 + rcx + 20], 9 QUAD $0x14165c203a0f4366; BYTE $0x0a // pinsrb xmm3, byte [r14 + r10 + 20], 10 - QUAD $0x14065c203a0f4366; BYTE $0x0b // pinsrb xmm3, byte [r14 + r8 + 20], 11 + QUAD $0x14165c203a0f4166; BYTE $0x0b // pinsrb xmm3, byte [r14 + rdx + 20], 11 QUAD $0x143e5c203a0f4166; BYTE $0x0c // pinsrb xmm3, byte [r14 + rdi + 20], 12 - QUAD $0x14165c203a0f4166; BYTE $0x0d // pinsrb xmm3, byte [r14 + rdx + 20], 13 - QUAD $0x141e5c203a0f4366; BYTE $0x0e // pinsrb xmm3, byte [r14 + r11 + 20], 14 + QUAD $0x142e5c203a0f4366; BYTE $0x0d // pinsrb xmm3, byte [r14 + r13 + 20], 13 + QUAD $0x14265c203a0f4366; BYTE $0x0e // pinsrb xmm3, byte [r14 + r12 + 20], 14 LONG $0x740f4166; BYTE $0xfe // pcmpeqb xmm7, xmm14 QUAD $0x000110b56f0f4466; BYTE $0x00 // movdqa xmm14, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0xdf0f4166; BYTE $0xfe // pandn xmm7, xmm14 @@ -27459,7 +28715,7 @@ LBB5_86: QUAD $0x000120b56f0f4466; BYTE $0x00 // movdqa xmm14, oword 288[rbp] /* [rip + .LCPI5_18] */ LONG $0xdf0f4166; BYTE $0xee // pandn xmm5, xmm14 LONG $0xefeb0f66 // por xmm5, xmm7 - QUAD $0x142e5c203a0f4366; BYTE $0x0f // pinsrb xmm3, byte [r14 + r13 + 20], 15 + QUAD $0x141e5c203a0f4166; BYTE $0x0f // pinsrb xmm3, byte [r14 + rbx + 20], 15 QUAD $0x00b024b46f0f4466; WORD $0x0000 // movdqa xmm14, oword [rsp + 176] LONG $0x740f4166; BYTE $0xde // pcmpeqb xmm3, xmm14 QUAD $0x00000130bd6f0f66 // movdqa xmm7, oword 304[rbp] /* [rip + .LCPI5_19] */ @@ -27470,42 +28726,42 @@ LBB5_86: LONG $0xdceb0f66 // por xmm3, xmm4 QUAD $0x153e54203a0f4366; BYTE $0x03 // pinsrb xmm2, byte [r14 + r15 + 21], 3 QUAD $0x150654203a0f4166; BYTE $0x04 // pinsrb xmm2, byte [r14 + rax + 21], 4 - QUAD $0x151e54203a0f4166; BYTE $0x05 // pinsrb xmm2, byte [r14 + rbx + 21], 5 - QUAD $0x152654203a0f4366; BYTE $0x06 // pinsrb xmm2, byte [r14 + r12 + 21], 6 - QUAD $0x150e54203a0f4166; BYTE $0x07 // pinsrb xmm2, byte [r14 + rcx + 21], 7 + QUAD $0x150e54203a0f4366; BYTE $0x05 // pinsrb xmm2, byte [r14 + r9 + 21], 5 + QUAD $0x151e54203a0f4366; BYTE $0x06 // pinsrb xmm2, byte [r14 + r11 + 21], 6 + QUAD $0x150654203a0f4366; BYTE $0x07 // pinsrb xmm2, byte [r14 + r8 + 21], 7 QUAD $0x153654203a0f4166; BYTE $0x08 // pinsrb xmm2, byte [r14 + rsi + 21], 8 - QUAD $0x150e54203a0f4366; BYTE $0x09 // pinsrb xmm2, byte [r14 + r9 + 21], 9 + QUAD $0x150e54203a0f4166; BYTE $0x09 // pinsrb xmm2, byte [r14 + rcx + 21], 9 QUAD $0x151654203a0f4366; BYTE $0x0a // pinsrb xmm2, byte [r14 + r10 + 21], 10 - QUAD $0x150654203a0f4366; BYTE $0x0b // pinsrb xmm2, byte [r14 + r8 + 21], 11 + QUAD $0x151654203a0f4166; BYTE $0x0b // pinsrb xmm2, byte [r14 + rdx + 21], 11 QUAD $0x153e54203a0f4166; BYTE $0x0c // pinsrb xmm2, byte [r14 + rdi + 21], 12 - QUAD $0x151654203a0f4166; BYTE $0x0d // pinsrb xmm2, byte [r14 + rdx + 21], 13 - QUAD $0x151e54203a0f4366; BYTE $0x0e // pinsrb xmm2, byte [r14 + r11 + 21], 14 - QUAD $0x152e54203a0f4366; BYTE $0x0f // pinsrb xmm2, byte [r14 + r13 + 21], 15 + QUAD $0x152e54203a0f4366; BYTE $0x0d // pinsrb xmm2, byte [r14 + r13 + 21], 13 + QUAD $0x152654203a0f4366; BYTE $0x0e // pinsrb xmm2, byte [r14 + r12 + 21], 14 + QUAD $0x151e54203a0f4166; BYTE $0x0f // pinsrb xmm2, byte [r14 + rbx + 21], 15 QUAD $0x163e4c203a0f4366; BYTE $0x03 // pinsrb xmm1, byte [r14 + r15 + 22], 3 QUAD $0x16064c203a0f4166; BYTE $0x04 // pinsrb xmm1, byte [r14 + rax + 22], 4 - QUAD $0x161e4c203a0f4166; BYTE $0x05 // pinsrb xmm1, byte [r14 + rbx + 22], 5 - QUAD $0x16264c203a0f4366; BYTE $0x06 // pinsrb xmm1, byte [r14 + r12 + 22], 6 - QUAD $0x160e4c203a0f4166; BYTE $0x07 // pinsrb xmm1, byte [r14 + rcx + 22], 7 + QUAD $0x160e4c203a0f4366; BYTE $0x05 // pinsrb xmm1, byte [r14 + r9 + 22], 5 + QUAD $0x161e4c203a0f4366; BYTE $0x06 // pinsrb xmm1, byte [r14 + r11 + 22], 6 + QUAD $0x16064c203a0f4366; BYTE $0x07 // pinsrb xmm1, byte [r14 + r8 + 22], 7 QUAD $0x16364c203a0f4166; BYTE $0x08 // pinsrb xmm1, byte [r14 + rsi + 22], 8 - QUAD $0x160e4c203a0f4366; BYTE $0x09 // pinsrb xmm1, byte [r14 + r9 + 22], 9 + QUAD $0x160e4c203a0f4166; BYTE $0x09 // pinsrb xmm1, byte [r14 + rcx + 22], 9 QUAD $0x16164c203a0f4366; BYTE $0x0a // pinsrb xmm1, byte [r14 + r10 + 22], 10 - QUAD $0x16064c203a0f4366; BYTE $0x0b // pinsrb xmm1, byte [r14 + r8 + 22], 11 + QUAD $0x16164c203a0f4166; BYTE $0x0b // pinsrb xmm1, byte [r14 + rdx + 22], 11 QUAD $0x163e4c203a0f4166; BYTE $0x0c // pinsrb xmm1, byte [r14 + rdi + 22], 12 - QUAD $0x16164c203a0f4166; BYTE $0x0d // pinsrb xmm1, byte [r14 + rdx + 22], 13 - QUAD $0x161e4c203a0f4366; BYTE $0x0e // pinsrb xmm1, byte [r14 + r11 + 22], 14 - QUAD $0x162e4c203a0f4366; BYTE $0x0f // pinsrb xmm1, byte [r14 + r13 + 22], 15 + QUAD $0x162e4c203a0f4366; BYTE $0x0d // pinsrb xmm1, byte [r14 + r13 + 22], 13 + QUAD $0x16264c203a0f4366; BYTE $0x0e // pinsrb xmm1, byte [r14 + r12 + 22], 14 + QUAD $0x161e4c203a0f4166; BYTE $0x0f // pinsrb xmm1, byte [r14 + rbx + 22], 15 QUAD $0x173e44203a0f4766; BYTE $0x03 // pinsrb xmm8, byte [r14 + r15 + 23], 3 QUAD $0x170644203a0f4566; BYTE $0x04 // pinsrb xmm8, byte [r14 + rax + 23], 4 - QUAD $0x171e44203a0f4566; BYTE $0x05 // pinsrb xmm8, byte [r14 + rbx + 23], 5 - QUAD $0x172644203a0f4766; BYTE $0x06 // pinsrb xmm8, byte [r14 + r12 + 23], 6 - QUAD $0x170e44203a0f4566; BYTE $0x07 // pinsrb xmm8, byte [r14 + rcx + 23], 7 + QUAD $0x170e44203a0f4766; BYTE $0x05 // pinsrb xmm8, byte [r14 + r9 + 23], 5 + QUAD $0x171e44203a0f4766; BYTE $0x06 // pinsrb xmm8, byte [r14 + r11 + 23], 6 + QUAD $0x170644203a0f4766; BYTE $0x07 // pinsrb xmm8, byte [r14 + r8 + 23], 7 QUAD $0x173644203a0f4566; BYTE $0x08 // pinsrb xmm8, byte [r14 + rsi + 23], 8 - QUAD $0x170e44203a0f4766; BYTE $0x09 // pinsrb xmm8, byte [r14 + r9 + 23], 9 + QUAD $0x170e44203a0f4566; BYTE $0x09 // pinsrb xmm8, byte [r14 + rcx + 23], 9 QUAD $0x171644203a0f4766; BYTE $0x0a // pinsrb xmm8, byte [r14 + r10 + 23], 10 - QUAD $0x170644203a0f4766; BYTE $0x0b // pinsrb xmm8, byte [r14 + r8 + 23], 11 + QUAD $0x171644203a0f4566; BYTE $0x0b // pinsrb xmm8, byte [r14 + rdx + 23], 11 QUAD $0x173e44203a0f4566; BYTE $0x0c // pinsrb xmm8, byte [r14 + rdi + 23], 12 - QUAD $0x171644203a0f4566; BYTE $0x0d // pinsrb xmm8, byte [r14 + rdx + 23], 13 - QUAD $0x171e44203a0f4766; BYTE $0x0e // pinsrb xmm8, byte [r14 + r11 + 23], 14 + QUAD $0x172e44203a0f4766; BYTE $0x0d // pinsrb xmm8, byte [r14 + r13 + 23], 13 + QUAD $0x172644203a0f4766; BYTE $0x0e // pinsrb xmm8, byte [r14 + r12 + 23], 14 LONG $0x740f4166; BYTE $0xd6 // pcmpeqb xmm2, xmm14 QUAD $0x00000140ad6f0f66 // movdqa xmm5, oword 320[rbp] /* [rip + .LCPI5_20] */ LONG $0xd5df0f66 // pandn xmm2, xmm5 @@ -27513,68 +28769,68 @@ LBB5_86: QUAD $0x00000150bd6f0f66 // movdqa xmm7, oword 336[rbp] /* [rip + .LCPI5_21] */ LONG $0xcfdf0f66 // pandn xmm1, xmm7 LONG $0xcaeb0f66 // por xmm1, xmm2 - QUAD $0x172e44203a0f4766; BYTE $0x0f // pinsrb xmm8, byte [r14 + r13 + 23], 15 + QUAD $0x171e44203a0f4566; BYTE $0x0f // pinsrb xmm8, byte [r14 + rbx + 23], 15 LONG $0x740f4566; BYTE $0xc6 // pcmpeqb xmm8, xmm14 LONG $0x656f0f66; BYTE $0x60 // movdqa xmm4, oword 96[rbp] /* [rip + .LCPI5_6] */ LONG $0xdf0f4466; BYTE $0xc4 // pandn xmm8, xmm4 LONG $0xeb0f4466; BYTE $0xc1 // por xmm8, xmm1 QUAD $0x183e64203a0f4766; BYTE $0x03 // pinsrb xmm12, byte [r14 + r15 + 24], 3 QUAD $0x180664203a0f4566; BYTE $0x04 // pinsrb xmm12, byte [r14 + rax + 24], 4 - QUAD $0x181e64203a0f4566; BYTE $0x05 // pinsrb xmm12, byte [r14 + rbx + 24], 5 - QUAD $0x182664203a0f4766; BYTE $0x06 // pinsrb xmm12, byte [r14 + r12 + 24], 6 - QUAD $0x180e64203a0f4566; BYTE $0x07 // pinsrb xmm12, byte [r14 + rcx + 24], 7 + QUAD $0x180e64203a0f4766; BYTE $0x05 // pinsrb xmm12, byte [r14 + r9 + 24], 5 + QUAD $0x181e64203a0f4766; BYTE $0x06 // pinsrb xmm12, byte [r14 + r11 + 24], 6 + QUAD $0x180664203a0f4766; BYTE $0x07 // pinsrb xmm12, byte [r14 + r8 + 24], 7 QUAD $0x183664203a0f4566; BYTE $0x08 // pinsrb xmm12, byte [r14 + rsi + 24], 8 - QUAD $0x180e64203a0f4766; BYTE $0x09 // pinsrb xmm12, byte [r14 + r9 + 24], 9 + QUAD $0x180e64203a0f4566; BYTE $0x09 // pinsrb xmm12, byte [r14 + rcx + 24], 9 QUAD $0x181664203a0f4766; BYTE $0x0a // pinsrb xmm12, byte [r14 + r10 + 24], 10 - QUAD $0x180664203a0f4766; BYTE $0x0b // pinsrb xmm12, byte [r14 + r8 + 24], 11 + QUAD $0x181664203a0f4566; BYTE $0x0b // pinsrb xmm12, byte [r14 + rdx + 24], 11 QUAD $0x183e64203a0f4566; BYTE $0x0c // pinsrb xmm12, byte [r14 + rdi + 24], 12 - QUAD $0x181664203a0f4566; BYTE $0x0d // pinsrb xmm12, byte [r14 + rdx + 24], 13 - QUAD $0x181e64203a0f4766; BYTE $0x0e // pinsrb xmm12, byte [r14 + r11 + 24], 14 - QUAD $0x182e64203a0f4766; BYTE $0x0f // pinsrb xmm12, byte [r14 + r13 + 24], 15 + QUAD $0x182e64203a0f4766; BYTE $0x0d // pinsrb xmm12, byte [r14 + r13 + 24], 13 + QUAD $0x182664203a0f4766; BYTE $0x0e // pinsrb xmm12, byte [r14 + r12 + 24], 14 + QUAD $0x181e64203a0f4566; BYTE $0x0f // pinsrb xmm12, byte [r14 + rbx + 24], 15 LONG $0xeb0f4466; BYTE $0xc3 // por xmm8, xmm3 LONG $0x740f4566; BYTE $0xe6 // pcmpeqb xmm12, xmm14 QUAD $0x193e6c203a0f4766; BYTE $0x03 // pinsrb xmm13, byte [r14 + r15 + 25], 3 QUAD $0x19066c203a0f4566; BYTE $0x04 // pinsrb xmm13, byte [r14 + rax + 25], 4 - QUAD $0x191e6c203a0f4566; BYTE $0x05 // pinsrb xmm13, byte [r14 + rbx + 25], 5 - QUAD $0x19266c203a0f4766; BYTE $0x06 // pinsrb xmm13, byte [r14 + r12 + 25], 6 - QUAD $0x190e6c203a0f4566; BYTE $0x07 // pinsrb xmm13, byte [r14 + rcx + 25], 7 + QUAD $0x190e6c203a0f4766; BYTE $0x05 // pinsrb xmm13, byte [r14 + r9 + 25], 5 + QUAD $0x191e6c203a0f4766; BYTE $0x06 // pinsrb xmm13, byte [r14 + r11 + 25], 6 + QUAD $0x19066c203a0f4766; BYTE $0x07 // pinsrb xmm13, byte [r14 + r8 + 25], 7 QUAD $0x19366c203a0f4566; BYTE $0x08 // pinsrb xmm13, byte [r14 + rsi + 25], 8 - QUAD $0x190e6c203a0f4766; BYTE $0x09 // pinsrb xmm13, byte [r14 + r9 + 25], 9 + QUAD $0x190e6c203a0f4566; BYTE $0x09 // pinsrb xmm13, byte [r14 + rcx + 25], 9 QUAD $0x19166c203a0f4766; BYTE $0x0a // pinsrb xmm13, byte [r14 + r10 + 25], 10 - QUAD $0x19066c203a0f4766; BYTE $0x0b // pinsrb xmm13, byte [r14 + r8 + 25], 11 + QUAD $0x19166c203a0f4566; BYTE $0x0b // pinsrb xmm13, byte [r14 + rdx + 25], 11 QUAD $0x193e6c203a0f4566; BYTE $0x0c // pinsrb xmm13, byte [r14 + rdi + 25], 12 - QUAD $0x19166c203a0f4566; BYTE $0x0d // pinsrb xmm13, byte [r14 + rdx + 25], 13 - QUAD $0x191e6c203a0f4766; BYTE $0x0e // pinsrb xmm13, byte [r14 + r11 + 25], 14 - QUAD $0x192e6c203a0f4766; BYTE $0x0f // pinsrb xmm13, byte [r14 + r13 + 25], 15 + QUAD $0x192e6c203a0f4766; BYTE $0x0d // pinsrb xmm13, byte [r14 + r13 + 25], 13 + QUAD $0x19266c203a0f4766; BYTE $0x0e // pinsrb xmm13, byte [r14 + r12 + 25], 14 + QUAD $0x191e6c203a0f4566; BYTE $0x0f // pinsrb xmm13, byte [r14 + rbx + 25], 15 QUAD $0x1a3e44203a0f4366; BYTE $0x03 // pinsrb xmm0, byte [r14 + r15 + 26], 3 QUAD $0x1a0644203a0f4166; BYTE $0x04 // pinsrb xmm0, byte [r14 + rax + 26], 4 - QUAD $0x1a1e44203a0f4166; BYTE $0x05 // pinsrb xmm0, byte [r14 + rbx + 26], 5 - QUAD $0x1a2644203a0f4366; BYTE $0x06 // pinsrb xmm0, byte [r14 + r12 + 26], 6 - QUAD $0x1a0e44203a0f4166; BYTE $0x07 // pinsrb xmm0, byte [r14 + rcx + 26], 7 + QUAD $0x1a0e44203a0f4366; BYTE $0x05 // pinsrb xmm0, byte [r14 + r9 + 26], 5 + QUAD $0x1a1e44203a0f4366; BYTE $0x06 // pinsrb xmm0, byte [r14 + r11 + 26], 6 + QUAD $0x1a0644203a0f4366; BYTE $0x07 // pinsrb xmm0, byte [r14 + r8 + 26], 7 QUAD $0x1a3644203a0f4166; BYTE $0x08 // pinsrb xmm0, byte [r14 + rsi + 26], 8 - QUAD $0x1a0e44203a0f4366; BYTE $0x09 // pinsrb xmm0, byte [r14 + r9 + 26], 9 + QUAD $0x1a0e44203a0f4166; BYTE $0x09 // pinsrb xmm0, byte [r14 + rcx + 26], 9 QUAD $0x1a1644203a0f4366; BYTE $0x0a // pinsrb xmm0, byte [r14 + r10 + 26], 10 - QUAD $0x1a0644203a0f4366; BYTE $0x0b // pinsrb xmm0, byte [r14 + r8 + 26], 11 + QUAD $0x1a1644203a0f4166; BYTE $0x0b // pinsrb xmm0, byte [r14 + rdx + 26], 11 QUAD $0x1a3e44203a0f4166; BYTE $0x0c // pinsrb xmm0, byte [r14 + rdi + 26], 12 - QUAD $0x1a1644203a0f4166; BYTE $0x0d // pinsrb xmm0, byte [r14 + rdx + 26], 13 - QUAD $0x1a1e44203a0f4366; BYTE $0x0e // pinsrb xmm0, byte [r14 + r11 + 26], 14 - QUAD $0x1a2e44203a0f4366; BYTE $0x0f // pinsrb xmm0, byte [r14 + r13 + 26], 15 + QUAD $0x1a2e44203a0f4366; BYTE $0x0d // pinsrb xmm0, byte [r14 + r13 + 26], 13 + QUAD $0x1a2644203a0f4366; BYTE $0x0e // pinsrb xmm0, byte [r14 + r12 + 26], 14 + QUAD $0x1a1e44203a0f4166; BYTE $0x0f // pinsrb xmm0, byte [r14 + rbx + 26], 15 QUAD $0x1b3e5c203a0f4766; BYTE $0x03 // pinsrb xmm11, byte [r14 + r15 + 27], 3 QUAD $0x1b065c203a0f4566; BYTE $0x04 // pinsrb xmm11, byte [r14 + rax + 27], 4 - QUAD $0x1b1e5c203a0f4566; BYTE $0x05 // pinsrb xmm11, byte [r14 + rbx + 27], 5 - QUAD $0x1b265c203a0f4766; BYTE $0x06 // pinsrb xmm11, byte [r14 + r12 + 27], 6 - QUAD $0x1b0e5c203a0f4566; BYTE $0x07 // pinsrb xmm11, byte [r14 + rcx + 27], 7 + QUAD $0x1b0e5c203a0f4766; BYTE $0x05 // pinsrb xmm11, byte [r14 + r9 + 27], 5 + QUAD $0x1b1e5c203a0f4766; BYTE $0x06 // pinsrb xmm11, byte [r14 + r11 + 27], 6 + QUAD $0x1b065c203a0f4766; BYTE $0x07 // pinsrb xmm11, byte [r14 + r8 + 27], 7 QUAD $0x1b365c203a0f4566; BYTE $0x08 // pinsrb xmm11, byte [r14 + rsi + 27], 8 - QUAD $0x1b0e5c203a0f4766; BYTE $0x09 // pinsrb xmm11, byte [r14 + r9 + 27], 9 + QUAD $0x1b0e5c203a0f4566; BYTE $0x09 // pinsrb xmm11, byte [r14 + rcx + 27], 9 QUAD $0x1b165c203a0f4766; BYTE $0x0a // pinsrb xmm11, byte [r14 + r10 + 27], 10 - QUAD $0x1b065c203a0f4766; BYTE $0x0b // pinsrb xmm11, byte [r14 + r8 + 27], 11 + QUAD $0x1b165c203a0f4566; BYTE $0x0b // pinsrb xmm11, byte [r14 + rdx + 27], 11 QUAD $0x1b3e5c203a0f4566; BYTE $0x0c // pinsrb xmm11, byte [r14 + rdi + 27], 12 - QUAD $0x1b165c203a0f4566; BYTE $0x0d // pinsrb xmm11, byte [r14 + rdx + 27], 13 - QUAD $0x1b1e5c203a0f4766; BYTE $0x0e // pinsrb xmm11, byte [r14 + r11 + 27], 14 + QUAD $0x1b2e5c203a0f4766; BYTE $0x0d // pinsrb xmm11, byte [r14 + r13 + 27], 13 + QUAD $0x1b265c203a0f4766; BYTE $0x0e // pinsrb xmm11, byte [r14 + r12 + 27], 14 LONG $0x740f4566; BYTE $0xee // pcmpeqb xmm13, xmm14 QUAD $0x000100addf0f4466; BYTE $0x00 // pandn xmm13, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xfc0f4566; BYTE $0xec // paddb xmm13, xmm12 - QUAD $0x1b2e5c203a0f4766; BYTE $0x0f // pinsrb xmm11, byte [r14 + r13 + 27], 15 + QUAD $0x1b1e5c203a0f4566; BYTE $0x0f // pinsrb xmm11, byte [r14 + rbx + 27], 15 LONG $0x740f4166; BYTE $0xc6 // pcmpeqb xmm0, xmm14 QUAD $0x0000011085df0f66 // pandn xmm0, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0x740f4566; BYTE $0xde // pcmpeqb xmm11, xmm14 @@ -27588,61 +28844,60 @@ LBB5_86: QUAD $0x1d064c203a0f4566; BYTE $0x04 // pinsrb xmm9, byte [r14 + rax + 29], 4 QUAD $0x1e0654203a0f4566; BYTE $0x04 // pinsrb xmm10, byte [r14 + rax + 30], 4 QUAD $0x1f0674203a0f4166; BYTE $0x04 // pinsrb xmm6, byte [r14 + rax + 31], 4 - QUAD $0x1c1e7c203a0f4566; BYTE $0x05 // pinsrb xmm15, byte [r14 + rbx + 28], 5 - QUAD $0x1d1e4c203a0f4566; BYTE $0x05 // pinsrb xmm9, byte [r14 + rbx + 29], 5 - QUAD $0x1e1e54203a0f4566; BYTE $0x05 // pinsrb xmm10, byte [r14 + rbx + 30], 5 - QUAD $0x1f1e74203a0f4166; BYTE $0x05 // pinsrb xmm6, byte [r14 + rbx + 31], 5 - QUAD $0x1c267c203a0f4766; BYTE $0x06 // pinsrb xmm15, byte [r14 + r12 + 28], 6 - QUAD $0x1d264c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r12 + 29], 6 - QUAD $0x1e2654203a0f4766; BYTE $0x06 // pinsrb xmm10, byte [r14 + r12 + 30], 6 - QUAD $0x1f2674203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r12 + 31], 6 - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x1c0e7c203a0f4566; BYTE $0x07 // pinsrb xmm15, byte [r14 + rcx + 28], 7 - QUAD $0x1d0e4c203a0f4566; BYTE $0x07 // pinsrb xmm9, byte [r14 + rcx + 29], 7 - QUAD $0x1e0e54203a0f4566; BYTE $0x07 // pinsrb xmm10, byte [r14 + rcx + 30], 7 - QUAD $0x1f0e74203a0f4166; BYTE $0x07 // pinsrb xmm6, byte [r14 + rcx + 31], 7 + WORD $0x894c; BYTE $0xc8 // mov rax, r9 + QUAD $0x1c0e7c203a0f4766; BYTE $0x05 // pinsrb xmm15, byte [r14 + r9 + 28], 5 + QUAD $0x1d0e4c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r9 + 29], 5 + QUAD $0x1e0e54203a0f4766; BYTE $0x05 // pinsrb xmm10, byte [r14 + r9 + 30], 5 + QUAD $0x1f0e74203a0f4366; BYTE $0x05 // pinsrb xmm6, byte [r14 + r9 + 31], 5 + QUAD $0x1c1e7c203a0f4766; BYTE $0x06 // pinsrb xmm15, byte [r14 + r11 + 28], 6 + QUAD $0x1d1e4c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r11 + 29], 6 + QUAD $0x1e1e54203a0f4766; BYTE $0x06 // pinsrb xmm10, byte [r14 + r11 + 30], 6 + QUAD $0x1f1e74203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r11 + 31], 6 + WORD $0x894c; BYTE $0xc0 // mov rax, r8 + QUAD $0x1c067c203a0f4766; BYTE $0x07 // pinsrb xmm15, byte [r14 + r8 + 28], 7 + QUAD $0x1d064c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r8 + 29], 7 + QUAD $0x1e0654203a0f4766; BYTE $0x07 // pinsrb xmm10, byte [r14 + r8 + 30], 7 + QUAD $0x1f0674203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r8 + 31], 7 WORD $0x8948; BYTE $0xf0 // mov rax, rsi QUAD $0x1c367c203a0f4566; BYTE $0x08 // pinsrb xmm15, byte [r14 + rsi + 28], 8 QUAD $0x1d364c203a0f4566; BYTE $0x08 // pinsrb xmm9, byte [r14 + rsi + 29], 8 QUAD $0x1e3654203a0f4566; BYTE $0x08 // pinsrb xmm10, byte [r14 + rsi + 30], 8 QUAD $0x1f3674203a0f4166; BYTE $0x08 // pinsrb xmm6, byte [r14 + rsi + 31], 8 - WORD $0x894c; BYTE $0xc8 // mov rax, r9 - QUAD $0x1c0e7c203a0f4766; BYTE $0x09 // pinsrb xmm15, byte [r14 + r9 + 28], 9 - QUAD $0x1d0e4c203a0f4766; BYTE $0x09 // pinsrb xmm9, byte [r14 + r9 + 29], 9 - QUAD $0x1e0e54203a0f4766; BYTE $0x09 // pinsrb xmm10, byte [r14 + r9 + 30], 9 - QUAD $0x1f0e74203a0f4366; BYTE $0x09 // pinsrb xmm6, byte [r14 + r9 + 31], 9 + WORD $0x8948; BYTE $0xc8 // mov rax, rcx + QUAD $0x1c0e7c203a0f4566; BYTE $0x09 // pinsrb xmm15, byte [r14 + rcx + 28], 9 + QUAD $0x1d0e4c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rcx + 29], 9 + QUAD $0x1e0e54203a0f4566; BYTE $0x09 // pinsrb xmm10, byte [r14 + rcx + 30], 9 + QUAD $0x1f0e74203a0f4166; BYTE $0x09 // pinsrb xmm6, byte [r14 + rcx + 31], 9 WORD $0x894c; BYTE $0xd0 // mov rax, r10 QUAD $0x1c167c203a0f4766; BYTE $0x0a // pinsrb xmm15, byte [r14 + r10 + 28], 10 QUAD $0x1d164c203a0f4766; BYTE $0x0a // pinsrb xmm9, byte [r14 + r10 + 29], 10 QUAD $0x1e1654203a0f4766; BYTE $0x0a // pinsrb xmm10, byte [r14 + r10 + 30], 10 QUAD $0x1f1674203a0f4366; BYTE $0x0a // pinsrb xmm6, byte [r14 + r10 + 31], 10 - WORD $0x894c; BYTE $0xc0 // mov rax, r8 - QUAD $0x1c067c203a0f4766; BYTE $0x0b // pinsrb xmm15, byte [r14 + r8 + 28], 11 - QUAD $0x1d064c203a0f4766; BYTE $0x0b // pinsrb xmm9, byte [r14 + r8 + 29], 11 - QUAD $0x1e0654203a0f4766; BYTE $0x0b // pinsrb xmm10, byte [r14 + r8 + 30], 11 - QUAD $0x1f0674203a0f4366; BYTE $0x0b // pinsrb xmm6, byte [r14 + r8 + 31], 11 + WORD $0x8948; BYTE $0xd0 // mov rax, rdx + QUAD $0x1c167c203a0f4566; BYTE $0x0b // pinsrb xmm15, byte [r14 + rdx + 28], 11 + QUAD $0x1d164c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rdx + 29], 11 + QUAD $0x1e1654203a0f4566; BYTE $0x0b // pinsrb xmm10, byte [r14 + rdx + 30], 11 + QUAD $0x1f1674203a0f4166; BYTE $0x0b // pinsrb xmm6, byte [r14 + rdx + 31], 11 WORD $0x8948; BYTE $0xf8 // mov rax, rdi QUAD $0x1c3e7c203a0f4566; BYTE $0x0c // pinsrb xmm15, byte [r14 + rdi + 28], 12 QUAD $0x1d3e4c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rdi + 29], 12 QUAD $0x1e3e54203a0f4566; BYTE $0x0c // pinsrb xmm10, byte [r14 + rdi + 30], 12 QUAD $0x1f3e74203a0f4166; BYTE $0x0c // pinsrb xmm6, byte [r14 + rdi + 31], 12 - WORD $0x8948; BYTE $0xd0 // mov rax, rdx - QUAD $0x1c167c203a0f4566; BYTE $0x0d // pinsrb xmm15, byte [r14 + rdx + 28], 13 - QUAD $0x1d164c203a0f4566; BYTE $0x0d // pinsrb xmm9, byte [r14 + rdx + 29], 13 - QUAD $0x1e1654203a0f4566; BYTE $0x0d // pinsrb xmm10, byte [r14 + rdx + 30], 13 - QUAD $0x1f1674203a0f4166; BYTE $0x0d // pinsrb xmm6, byte [r14 + rdx + 31], 13 - WORD $0x894c; BYTE $0xd8 // mov rax, r11 - QUAD $0x1c1e7c203a0f4766; BYTE $0x0e // pinsrb xmm15, byte [r14 + r11 + 28], 14 - QUAD $0x1d1e4c203a0f4766; BYTE $0x0e // pinsrb xmm9, byte [r14 + r11 + 29], 14 - QUAD $0x1e1e54203a0f4766; BYTE $0x0e // pinsrb xmm10, byte [r14 + r11 + 30], 14 - QUAD $0x1f1e74203a0f4366; BYTE $0x0e // pinsrb xmm6, byte [r14 + r11 + 31], 14 - QUAD $0x1c2e7c203a0f4766; BYTE $0x0f // pinsrb xmm15, byte [r14 + r13 + 28], 15 - QUAD $0x1d2e4c203a0f4766; BYTE $0x0f // pinsrb xmm9, byte [r14 + r13 + 29], 15 - QUAD $0x1e2e54203a0f4766; BYTE $0x0f // pinsrb xmm10, byte [r14 + r13 + 30], 15 + QUAD $0x1c2e7c203a0f4766; BYTE $0x0d // pinsrb xmm15, byte [r14 + r13 + 28], 13 + QUAD $0x1d2e4c203a0f4766; BYTE $0x0d // pinsrb xmm9, byte [r14 + r13 + 29], 13 + QUAD $0x1e2e54203a0f4766; BYTE $0x0d // pinsrb xmm10, byte [r14 + r13 + 30], 13 + QUAD $0x1f2e74203a0f4366; BYTE $0x0d // pinsrb xmm6, byte [r14 + r13 + 31], 13 + QUAD $0x1c267c203a0f4766; BYTE $0x0e // pinsrb xmm15, byte [r14 + r12 + 28], 14 + QUAD $0x1d264c203a0f4766; BYTE $0x0e // pinsrb xmm9, byte [r14 + r12 + 29], 14 + QUAD $0x1e2654203a0f4766; BYTE $0x0e // pinsrb xmm10, byte [r14 + r12 + 30], 14 + QUAD $0x1f2674203a0f4366; BYTE $0x0e // pinsrb xmm6, byte [r14 + r12 + 31], 14 + QUAD $0x1c1e7c203a0f4566; BYTE $0x0f // pinsrb xmm15, byte [r14 + rbx + 28], 15 + QUAD $0x1d1e4c203a0f4566; BYTE $0x0f // pinsrb xmm9, byte [r14 + rbx + 29], 15 + QUAD $0x1e1e54203a0f4566; BYTE $0x0f // pinsrb xmm10, byte [r14 + rbx + 30], 15 LONG $0x740f4566; BYTE $0xfe // pcmpeqb xmm15, xmm14 QUAD $0x000130bddf0f4466; BYTE $0x00 // pandn xmm15, oword 304[rbp] /* [rip + .LCPI5_19] */ LONG $0xeb0f4566; BYTE $0xfb // por xmm15, xmm11 - QUAD $0x1f2e74203a0f4366; BYTE $0x0f // pinsrb xmm6, byte [r14 + r13 + 31], 15 + QUAD $0x1f1e74203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rbx + 31], 15 QUAD $0x000160adf80f4466; BYTE $0x00 // psubb xmm13, oword 352[rbp] /* [rip + .LCPI5_22] */ LONG $0xeb0f4566; BYTE $0xfd // por xmm15, xmm13 LONG $0x740f4566; BYTE $0xce // pcmpeqb xmm9, xmm14 @@ -27656,7 +28911,7 @@ LBB5_86: LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 LONG $0x6f0f4166; BYTE $0xc0 // movdqa xmm0, xmm8 LONG $0xc6600f66 // punpcklbw xmm0, xmm6 - QUAD $0x000100249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 256] + QUAD $0x0000f0249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 240] LONG $0xcb6f0f66 // movdqa xmm1, xmm3 QUAD $0x0000c024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 192] LONG $0xcc600f66 // punpcklbw xmm1, xmm4 @@ -27676,46 +28931,46 @@ LBB5_86: LONG $0x147f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm2 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000d8248c3b48 // cmp rcx, qword [rsp + 216] - JNE LBB5_86 - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x000000d824943b4c // cmp r10, qword [rsp + 216] + QUAD $0x000000e8248c3b48 // cmp rcx, qword [rsp + 232] + JNE LBB5_87 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x000000e824943b4c // cmp r10, qword [rsp + 232] QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - JNE LBB5_88 - JMP LBB5_91 + JNE LBB5_89 + JMP LBB5_92 -LBB5_66: +LBB5_67: LONG $0xf0e28349 // and r10, -16 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x05e0c148 // shl rax, 5 WORD $0x014c; BYTE $0xf0 // add rax, r14 QUAD $0x0000011024848948 // mov qword [rsp + 272], rax - QUAD $0x000000d82494894c // mov qword [rsp + 216], r10 + QUAD $0x000000e82494894c // mov qword [rsp + 232], r10 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] LONG $0x90048d4a // lea rax, [rax + 4*r10] - LONG $0x24448948; BYTE $0x58 // mov qword [rsp + 88], rax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 WORD $0xc031 // xor eax, eax -LBB5_67: +LBB5_68: QUAD $0x000000a824848948 // mov qword [rsp + 168], rax LONG $0x05e0c148 // shl rax, 5 - WORD $0x8949; BYTE $0xc0 // mov r8, rax - WORD $0x8949; BYTE $0xc3 // mov r11, rax - WORD $0x8949; BYTE $0xc1 // mov r9, rax WORD $0x8949; BYTE $0xc5 // mov r13, rax - WORD $0x8949; BYTE $0xc7 // mov r15, rax WORD $0x8948; BYTE $0xc7 // mov rdi, rax + WORD $0x8949; BYTE $0xc7 // mov r15, rax + WORD $0x8948; BYTE $0xc3 // mov rbx, rax WORD $0x8949; BYTE $0xc2 // mov r10, rax + WORD $0x8949; BYTE $0xc1 // mov r9, rax + WORD $0x8949; BYTE $0xc0 // mov r8, rax WORD $0x8949; BYTE $0xc4 // mov r12, rax - WORD $0x8948; BYTE $0xc3 // mov rbx, rax WORD $0x8948; BYTE $0xc2 // mov rdx, rax WORD $0x8948; BYTE $0xc6 // mov rsi, rax + LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax LONG $0x0cb60f41; BYTE $0x06 // movzx ecx, byte [r14 + rax] LONG $0xe16e0f66 // movd xmm4, ecx LONG $0x4cb60f41; WORD $0x0106 // movzx ecx, byte [r14 + rax + 1] @@ -27734,7 +28989,7 @@ LBB5_67: LONG $0x6e0f4466; BYTE $0xf1 // movd xmm14, ecx LONG $0x4cb60f41; WORD $0x0806 // movzx ecx, byte [r14 + rax + 8] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00010024847f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm0 + QUAD $0x0000f024847f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm0 LONG $0x4cb60f41; WORD $0x0906 // movzx ecx, byte [r14 + rax + 9] LONG $0x6e0f4466; BYTE $0xd9 // movd xmm11, ecx LONG $0x4cb60f41; WORD $0x0a06 // movzx ecx, byte [r14 + rax + 10] @@ -27743,7 +28998,7 @@ LBB5_67: LONG $0x6e0f4466; BYTE $0xe9 // movd xmm13, ecx LONG $0x4cb60f41; WORD $0x0c06 // movzx ecx, byte [r14 + rax + 12] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x0000e024847f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm0 + QUAD $0x0000d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm0 LONG $0x4cb60f41; WORD $0x0d06 // movzx ecx, byte [r14 + rax + 13] LONG $0xf16e0f66 // movd xmm6, ecx LONG $0x4cb60f41; WORD $0x0e06 // movzx ecx, byte [r14 + rax + 14] @@ -27751,152 +29006,153 @@ LBB5_67: LONG $0x4cb60f41; WORD $0x0f06 // movzx ecx, byte [r14 + rax + 15] LONG $0xc16e0f66 // movd xmm0, ecx QUAD $0x0000c024847f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm0 - QUAD $0x0000008024848948 // mov qword [rsp + 128], rax - WORD $0x8948; BYTE $0xc1 // mov rcx, rax - LONG $0x20c98348 // or rcx, 32 - LONG $0x244c8948; BYTE $0x10 // mov qword [rsp + 16], rcx - LONG $0x40c88349 // or r8, 64 - LONG $0x2444894c; BYTE $0x20 // mov qword [rsp + 32], r8 - LONG $0x60cb8349 // or r11, 96 - LONG $0x245c894c; BYTE $0x50 // mov qword [rsp + 80], r11 - LONG $0x80c98149; WORD $0x0000; BYTE $0x00 // or r9, 128 - LONG $0x244c894c; BYTE $0x18 // mov qword [rsp + 24], r9 - LONG $0xa0cd8149; WORD $0x0000; BYTE $0x00 // or r13, 160 - LONG $0xc0cf8149; WORD $0x0000; BYTE $0x00 // or r15, 192 - LONG $0xe0cf8148; WORD $0x0000; BYTE $0x00 // or rdi, 224 - LONG $0x247c8948; BYTE $0x68 // mov qword [rsp + 104], rdi - LONG $0x00ca8149; WORD $0x0001; BYTE $0x00 // or r10, 256 - QUAD $0x000000982494894c // mov qword [rsp + 152], r10 + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + WORD $0x8949; BYTE $0xc3 // mov r11, rax + LONG $0x20cb8349 // or r11, 32 + LONG $0x245c894c; BYTE $0x10 // mov qword [rsp + 16], r11 + LONG $0x40cd8349 // or r13, 64 + LONG $0x60cf8348 // or rdi, 96 + LONG $0x80cf8149; WORD $0x0000; BYTE $0x00 // or r15, 128 + LONG $0xa0cb8148; WORD $0x0000; BYTE $0x00 // or rbx, 160 + LONG $0xc0ca8149; WORD $0x0000; BYTE $0x00 // or r10, 192 + LONG $0xe0c98149; WORD $0x0000; BYTE $0x00 // or r9, 224 + LONG $0x00c88149; WORD $0x0001; BYTE $0x00 // or r8, 256 LONG $0x20cc8149; WORD $0x0001; BYTE $0x00 // or r12, 288 - LONG $0x40cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 320 - QUAD $0x00000090249c8948 // mov qword [rsp + 144], rbx - LONG $0x60ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 352 + LONG $0x40ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 320 LONG $0x24548948; BYTE $0x70 // mov qword [rsp + 112], rdx - WORD $0x8948; BYTE $0xc3 // mov rbx, rax - LONG $0x80cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 384 - LONG $0x245c8948; BYTE $0x78 // mov qword [rsp + 120], rbx + LONG $0x60ce8148; WORD $0x0001; BYTE $0x00 // or rsi, 352 + QUAD $0x0000009824b48948 // mov qword [rsp + 152], rsi + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + LONG $0x80c98148; WORD $0x0001; BYTE $0x00 // or rcx, 384 + LONG $0x244c8948; BYTE $0x60 // mov qword [rsp + 96], rcx WORD $0x8948; BYTE $0xc2 // mov rdx, rax LONG $0xa0ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 416 - WORD $0x8948; BYTE $0xc1 // mov rcx, rax - LONG $0xc0c98148; WORD $0x0001; BYTE $0x00 // or rcx, 448 - LONG $0x244c8948; BYTE $0x40 // mov qword [rsp + 64], rcx - LONG $0xe0ce8148; WORD $0x0001; BYTE $0x00 // or rsi, 480 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] - QUAD $0x010624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 1 - QUAD $0x020624203a0f4366 // pinsrb xmm4, byte [r14 + r8], 2 - QUAD $0x031e24203a0f4366 // pinsrb xmm4, byte [r14 + r11], 3 - QUAD $0x040e24203a0f4366 // pinsrb xmm4, byte [r14 + r9], 4 - QUAD $0x052e24203a0f4366 // pinsrb xmm4, byte [r14 + r13], 5 - QUAD $0x063e24203a0f4366 // pinsrb xmm4, byte [r14 + r15], 6 - QUAD $0x073e24203a0f4166 // pinsrb xmm4, byte [r14 + rdi], 7 - QUAD $0x081624203a0f4366 // pinsrb xmm4, byte [r14 + r10], 8 + LONG $0x24548948; BYTE $0x18 // mov qword [rsp + 24], rdx + WORD $0x8948; BYTE $0xc2 // mov rdx, rax + LONG $0xc0ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 448 + LONG $0x24548948; BYTE $0x20 // mov qword [rsp + 32], rdx + WORD $0x8948; BYTE $0xc2 // mov rdx, rax + LONG $0xe0ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 480 + LONG $0x24548948; BYTE $0x50 // mov qword [rsp + 80], rdx + QUAD $0x011e24203a0f4366 // pinsrb xmm4, byte [r14 + r11], 1 + LONG $0x246c894c; BYTE $0x30 // mov qword [rsp + 48], r13 + QUAD $0x022e24203a0f4366 // pinsrb xmm4, byte [r14 + r13], 2 + LONG $0x247c8948; BYTE $0x28 // mov qword [rsp + 40], rdi + QUAD $0x033e24203a0f4166 // pinsrb xmm4, byte [r14 + rdi], 3 + LONG $0x247c894c; BYTE $0x78 // mov qword [rsp + 120], r15 + QUAD $0x043e24203a0f4366 // pinsrb xmm4, byte [r14 + r15], 4 + QUAD $0x00000080249c8948 // mov qword [rsp + 128], rbx + QUAD $0x051e24203a0f4166 // pinsrb xmm4, byte [r14 + rbx], 5 + QUAD $0x000000902494894c // mov qword [rsp + 144], r10 + QUAD $0x061624203a0f4366 // pinsrb xmm4, byte [r14 + r10], 6 + LONG $0x244c894c; BYTE $0x58 // mov qword [rsp + 88], r9 + QUAD $0x070e24203a0f4366 // pinsrb xmm4, byte [r14 + r9], 7 + LONG $0x2444894c; BYTE $0x48 // mov qword [rsp + 72], r8 + QUAD $0x080624203a0f4366 // pinsrb xmm4, byte [r14 + r8], 8 + WORD $0x894d; BYTE $0xe0 // mov r8, r12 QUAD $0x092624203a0f4366 // pinsrb xmm4, byte [r14 + r12], 9 - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] - QUAD $0x0a0624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 10 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x0b0624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 11 - QUAD $0x0c1e24203a0f4166 // pinsrb xmm4, byte [r14 + rbx], 12 - QUAD $0x0d1624203a0f4166 // pinsrb xmm4, byte [r14 + rdx], 13 - QUAD $0x0e0e24203a0f4166 // pinsrb xmm4, byte [r14 + rcx], 14 - QUAD $0x0f3624203a0f4166 // pinsrb xmm4, byte [r14 + rsi], 15 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] - QUAD $0x01065c203a0f4166; BYTE $0x01 // pinsrb xmm3, byte [r14 + rax + 1], 1 - QUAD $0x01065c203a0f4366; BYTE $0x02 // pinsrb xmm3, byte [r14 + r8 + 1], 2 - QUAD $0x011e5c203a0f4366; BYTE $0x03 // pinsrb xmm3, byte [r14 + r11 + 1], 3 - QUAD $0x010e5c203a0f4366; BYTE $0x04 // pinsrb xmm3, byte [r14 + r9 + 1], 4 - QUAD $0x012e5c203a0f4366; BYTE $0x05 // pinsrb xmm3, byte [r14 + r13 + 1], 5 - WORD $0x894d; BYTE $0xe9 // mov r9, r13 - QUAD $0x013e5c203a0f4366; BYTE $0x06 // pinsrb xmm3, byte [r14 + r15 + 1], 6 - WORD $0x894d; BYTE $0xfb // mov r11, r15 - QUAD $0x013e5c203a0f4166; BYTE $0x07 // pinsrb xmm3, byte [r14 + rdi + 1], 7 - QUAD $0x01165c203a0f4366; BYTE $0x08 // pinsrb xmm3, byte [r14 + r10 + 1], 8 - QUAD $0x01265c203a0f4366; BYTE $0x09 // pinsrb xmm3, byte [r14 + r12 + 1], 9 - WORD $0x894c; BYTE $0xe7 // mov rdi, r12 - QUAD $0x0000009024a48b4c // mov r12, qword [rsp + 144] + LONG $0x24648b4c; BYTE $0x70 // mov r12, qword [rsp + 112] + QUAD $0x0a2624203a0f4366 // pinsrb xmm4, byte [r14 + r12], 10 + QUAD $0x0b3624203a0f4166 // pinsrb xmm4, byte [r14 + rsi], 11 + QUAD $0x0c0e24203a0f4166 // pinsrb xmm4, byte [r14 + rcx], 12 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0d0624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 13 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0e0624203a0f4166 // pinsrb xmm4, byte [r14 + rax], 14 + QUAD $0x0f1624203a0f4166 // pinsrb xmm4, byte [r14 + rdx], 15 + QUAD $0x011e5c203a0f4366; BYTE $0x01 // pinsrb xmm3, byte [r14 + r11 + 1], 1 + QUAD $0x012e5c203a0f4366; BYTE $0x02 // pinsrb xmm3, byte [r14 + r13 + 1], 2 + QUAD $0x013e5c203a0f4166; BYTE $0x03 // pinsrb xmm3, byte [r14 + rdi + 1], 3 + QUAD $0x013e5c203a0f4366; BYTE $0x04 // pinsrb xmm3, byte [r14 + r15 + 1], 4 + QUAD $0x011e5c203a0f4166; BYTE $0x05 // pinsrb xmm3, byte [r14 + rbx + 1], 5 + QUAD $0x01165c203a0f4366; BYTE $0x06 // pinsrb xmm3, byte [r14 + r10 + 1], 6 + QUAD $0x010e5c203a0f4366; BYTE $0x07 // pinsrb xmm3, byte [r14 + r9 + 1], 7 + LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] + QUAD $0x012e5c203a0f4366; BYTE $0x08 // pinsrb xmm3, byte [r14 + r13 + 1], 8 + QUAD $0x01065c203a0f4366; BYTE $0x09 // pinsrb xmm3, byte [r14 + r8 + 1], 9 + WORD $0x894c; BYTE $0xc7 // mov rdi, r8 QUAD $0x01265c203a0f4366; BYTE $0x0a // pinsrb xmm3, byte [r14 + r12 + 1], 10 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x01065c203a0f4166; BYTE $0x0b // pinsrb xmm3, byte [r14 + rax + 1], 11 - QUAD $0x011e5c203a0f4166; BYTE $0x0c // pinsrb xmm3, byte [r14 + rbx + 1], 12 - QUAD $0x01165c203a0f4166; BYTE $0x0d // pinsrb xmm3, byte [r14 + rdx + 1], 13 - LONG $0x24548948; BYTE $0x30 // mov qword [rsp + 48], rdx - QUAD $0x010e5c203a0f4166; BYTE $0x0e // pinsrb xmm3, byte [r14 + rcx + 1], 14 + WORD $0x894d; BYTE $0xe7 // mov r15, r12 + QUAD $0x01365c203a0f4166; BYTE $0x0b // pinsrb xmm3, byte [r14 + rsi + 1], 11 + QUAD $0x010e5c203a0f4166; BYTE $0x0c // pinsrb xmm3, byte [r14 + rcx + 1], 12 + LONG $0x245c8b4c; BYTE $0x18 // mov r11, qword [rsp + 24] + QUAD $0x011e5c203a0f4366; BYTE $0x0d // pinsrb xmm3, byte [r14 + r11 + 1], 13 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x01065c203a0f4166; BYTE $0x0e // pinsrb xmm3, byte [r14 + rax + 1], 14 QUAD $0x0000b0248c6f0f66; BYTE $0x00 // movdqa xmm1, oword [rsp + 176] LONG $0xe1740f66 // pcmpeqb xmm4, xmm1 - QUAD $0x01365c203a0f4166; BYTE $0x0f // pinsrb xmm3, byte [r14 + rsi + 1], 15 - WORD $0x8949; BYTE $0xf0 // mov r8, rsi + QUAD $0x01165c203a0f4166; BYTE $0x0f // pinsrb xmm3, byte [r14 + rdx + 1], 15 LONG $0xd9740f66 // pcmpeqb xmm3, xmm1 QUAD $0x00000100856f0f66 // movdqa xmm0, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xd8df0f66 // pandn xmm3, xmm0 LONG $0xdcfc0f66 // paddb xmm3, xmm4 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] LONG $0x74b60f41; WORD $0x1006 // movzx esi, byte [r14 + rax + 16] LONG $0x6e0f4466; BYTE $0xd6 // movd xmm10, esi LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x02066c203a0f4166; BYTE $0x01 // pinsrb xmm5, byte [r14 + rax + 2], 1 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] - QUAD $0x022e6c203a0f4366; BYTE $0x02 // pinsrb xmm5, byte [r14 + r13 + 2], 2 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] - QUAD $0x023e6c203a0f4366; BYTE $0x03 // pinsrb xmm5, byte [r14 + r15 + 2], 3 - LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + QUAD $0x02266c203a0f4366; BYTE $0x02 // pinsrb xmm5, byte [r14 + r12 + 2], 2 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x02066c203a0f4166; BYTE $0x03 // pinsrb xmm5, byte [r14 + rax + 2], 3 + LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] QUAD $0x021e6c203a0f4166; BYTE $0x04 // pinsrb xmm5, byte [r14 + rbx + 2], 4 - QUAD $0x020e6c203a0f4366; BYTE $0x05 // pinsrb xmm5, byte [r14 + r9 + 2], 5 - QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 - QUAD $0x021e6c203a0f4366; BYTE $0x06 // pinsrb xmm5, byte [r14 + r11 + 2], 6 - LONG $0x24548b4c; BYTE $0x68 // mov r10, qword [rsp + 104] - QUAD $0x02166c203a0f4366; BYTE $0x07 // pinsrb xmm5, byte [r14 + r10 + 2], 7 - QUAD $0x0000009824848b48 // mov rax, qword [rsp + 152] - QUAD $0x02066c203a0f4166; BYTE $0x08 // pinsrb xmm5, byte [r14 + rax + 2], 8 - LONG $0x247c8948; BYTE $0x60 // mov qword [rsp + 96], rdi + QUAD $0x0000008024948b4c // mov r10, qword [rsp + 128] + QUAD $0x02166c203a0f4366; BYTE $0x05 // pinsrb xmm5, byte [r14 + r10 + 2], 5 + QUAD $0x00000090248c8b4c // mov r9, qword [rsp + 144] + QUAD $0x020e6c203a0f4366; BYTE $0x06 // pinsrb xmm5, byte [r14 + r9 + 2], 6 + LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] + QUAD $0x02066c203a0f4366; BYTE $0x07 // pinsrb xmm5, byte [r14 + r8 + 2], 7 + QUAD $0x022e6c203a0f4366; BYTE $0x08 // pinsrb xmm5, byte [r14 + r13 + 2], 8 + WORD $0x8948; BYTE $0xf9 // mov rcx, rdi QUAD $0x023e6c203a0f4166; BYTE $0x09 // pinsrb xmm5, byte [r14 + rdi + 2], 9 - QUAD $0x02266c203a0f4366; BYTE $0x0a // pinsrb xmm5, byte [r14 + r12 + 2], 10 - LONG $0x24748b48; BYTE $0x70 // mov rsi, qword [rsp + 112] - QUAD $0x02366c203a0f4166; BYTE $0x0b // pinsrb xmm5, byte [r14 + rsi + 2], 11 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x020e6c203a0f4166; BYTE $0x0c // pinsrb xmm5, byte [r14 + rcx + 2], 12 - QUAD $0x02166c203a0f4166; BYTE $0x0d // pinsrb xmm5, byte [r14 + rdx + 2], 13 - LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] - QUAD $0x02166c203a0f4166; BYTE $0x0e // pinsrb xmm5, byte [r14 + rdx + 2], 14 - QUAD $0x02066c203a0f4366; BYTE $0x0f // pinsrb xmm5, byte [r14 + r8 + 2], 15 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x03167c203a0f4166; BYTE $0x01 // pinsrb xmm7, byte [r14 + rdx + 3], 1 - QUAD $0x032e7c203a0f4366; BYTE $0x02 // pinsrb xmm7, byte [r14 + r13 + 3], 2 - QUAD $0x033e7c203a0f4366; BYTE $0x03 // pinsrb xmm7, byte [r14 + r15 + 3], 3 + QUAD $0x023e6c203a0f4366; BYTE $0x0a // pinsrb xmm5, byte [r14 + r15 + 2], 10 + QUAD $0x0000009824948b48 // mov rdx, qword [rsp + 152] + QUAD $0x02166c203a0f4166; BYTE $0x0b // pinsrb xmm5, byte [r14 + rdx + 2], 11 + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + QUAD $0x023e6c203a0f4166; BYTE $0x0c // pinsrb xmm5, byte [r14 + rdi + 2], 12 + QUAD $0x021e6c203a0f4366; BYTE $0x0d // pinsrb xmm5, byte [r14 + r11 + 2], 13 + LONG $0x24748b48; BYTE $0x20 // mov rsi, qword [rsp + 32] + QUAD $0x02366c203a0f4166; BYTE $0x0e // pinsrb xmm5, byte [r14 + rsi + 2], 14 + LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] + QUAD $0x021e6c203a0f4366; BYTE $0x0f // pinsrb xmm5, byte [r14 + r11 + 2], 15 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x03067c203a0f4166; BYTE $0x01 // pinsrb xmm7, byte [r14 + rax + 3], 1 + QUAD $0x03267c203a0f4366; BYTE $0x02 // pinsrb xmm7, byte [r14 + r12 + 3], 2 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x03067c203a0f4166; BYTE $0x03 // pinsrb xmm7, byte [r14 + rax + 3], 3 QUAD $0x031e7c203a0f4166; BYTE $0x04 // pinsrb xmm7, byte [r14 + rbx + 3], 4 - QUAD $0x030e7c203a0f4366; BYTE $0x05 // pinsrb xmm7, byte [r14 + r9 + 3], 5 - QUAD $0x031e7c203a0f4366; BYTE $0x06 // pinsrb xmm7, byte [r14 + r11 + 3], 6 - QUAD $0x03167c203a0f4366; BYTE $0x07 // pinsrb xmm7, byte [r14 + r10 + 3], 7 - QUAD $0x03067c203a0f4166; BYTE $0x08 // pinsrb xmm7, byte [r14 + rax + 3], 8 - QUAD $0x033e7c203a0f4166; BYTE $0x09 // pinsrb xmm7, byte [r14 + rdi + 3], 9 - QUAD $0x03267c203a0f4366; BYTE $0x0a // pinsrb xmm7, byte [r14 + r12 + 3], 10 - QUAD $0x03367c203a0f4166; BYTE $0x0b // pinsrb xmm7, byte [r14 + rsi + 3], 11 - QUAD $0x030e7c203a0f4166; BYTE $0x0c // pinsrb xmm7, byte [r14 + rcx + 3], 12 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] - QUAD $0x03167c203a0f4166; BYTE $0x0d // pinsrb xmm7, byte [r14 + rdx + 3], 13 - LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] - QUAD $0x03167c203a0f4166; BYTE $0x0e // pinsrb xmm7, byte [r14 + rdx + 3], 14 - QUAD $0x03067c203a0f4366; BYTE $0x0f // pinsrb xmm7, byte [r14 + r8 + 3], 15 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x04164c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rdx + 4], 1 - QUAD $0x042e4c203a0f4766; BYTE $0x02 // pinsrb xmm9, byte [r14 + r13 + 4], 2 - QUAD $0x043e4c203a0f4766; BYTE $0x03 // pinsrb xmm9, byte [r14 + r15 + 4], 3 + QUAD $0x03167c203a0f4366; BYTE $0x05 // pinsrb xmm7, byte [r14 + r10 + 3], 5 + QUAD $0x030e7c203a0f4366; BYTE $0x06 // pinsrb xmm7, byte [r14 + r9 + 3], 6 + QUAD $0x03067c203a0f4366; BYTE $0x07 // pinsrb xmm7, byte [r14 + r8 + 3], 7 + QUAD $0x032e7c203a0f4366; BYTE $0x08 // pinsrb xmm7, byte [r14 + r13 + 3], 8 + QUAD $0x030e7c203a0f4166; BYTE $0x09 // pinsrb xmm7, byte [r14 + rcx + 3], 9 + QUAD $0x033e7c203a0f4366; BYTE $0x0a // pinsrb xmm7, byte [r14 + r15 + 3], 10 + QUAD $0x03167c203a0f4166; BYTE $0x0b // pinsrb xmm7, byte [r14 + rdx + 3], 11 + QUAD $0x033e7c203a0f4166; BYTE $0x0c // pinsrb xmm7, byte [r14 + rdi + 3], 12 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x03067c203a0f4166; BYTE $0x0d // pinsrb xmm7, byte [r14 + rax + 3], 13 + QUAD $0x03367c203a0f4166; BYTE $0x0e // pinsrb xmm7, byte [r14 + rsi + 3], 14 + QUAD $0x031e7c203a0f4366; BYTE $0x0f // pinsrb xmm7, byte [r14 + r11 + 3], 15 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x04064c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rax + 4], 1 + QUAD $0x04264c203a0f4766; BYTE $0x02 // pinsrb xmm9, byte [r14 + r12 + 4], 2 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x04064c203a0f4566; BYTE $0x03 // pinsrb xmm9, byte [r14 + rax + 4], 3 QUAD $0x041e4c203a0f4566; BYTE $0x04 // pinsrb xmm9, byte [r14 + rbx + 4], 4 - QUAD $0x040e4c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r9 + 4], 5 - WORD $0x894d; BYTE $0xcf // mov r15, r9 - LONG $0x244c894c; BYTE $0x38 // mov qword [rsp + 56], r9 - QUAD $0x041e4c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r11 + 4], 6 - QUAD $0x04164c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r10 + 4], 7 - WORD $0x894d; BYTE $0xd1 // mov r9, r10 - QUAD $0x04064c203a0f4566; BYTE $0x08 // pinsrb xmm9, byte [r14 + rax + 4], 8 - QUAD $0x043e4c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rdi + 4], 9 - QUAD $0x04264c203a0f4766; BYTE $0x0a // pinsrb xmm9, byte [r14 + r12 + 4], 10 - QUAD $0x04364c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rsi + 4], 11 - WORD $0x8948; BYTE $0xf7 // mov rdi, rsi - QUAD $0x040e4c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rcx + 4], 12 - LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] - QUAD $0x04264c203a0f4766; BYTE $0x0d // pinsrb xmm9, byte [r14 + r12 + 4], 13 - LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] - QUAD $0x04164c203a0f4566; BYTE $0x0e // pinsrb xmm9, byte [r14 + rdx + 4], 14 - QUAD $0x04064c203a0f4766; BYTE $0x0f // pinsrb xmm9, byte [r14 + r8 + 4], 15 + QUAD $0x04164c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r10 + 4], 5 + QUAD $0x040e4c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r9 + 4], 6 + QUAD $0x04064c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r8 + 4], 7 + QUAD $0x042e4c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r13 + 4], 8 + WORD $0x894c; BYTE $0xe8 // mov rax, r13 + QUAD $0x040e4c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rcx + 4], 9 + QUAD $0x043e4c203a0f4766; BYTE $0x0a // pinsrb xmm9, byte [r14 + r15 + 4], 10 + QUAD $0x04164c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rdx + 4], 11 + QUAD $0x043e4c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rdi + 4], 12 + LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] + QUAD $0x042e4c203a0f4766; BYTE $0x0d // pinsrb xmm9, byte [r14 + r13 + 4], 13 + QUAD $0x04364c203a0f4566; BYTE $0x0e // pinsrb xmm9, byte [r14 + rsi + 4], 14 + QUAD $0x041e4c203a0f4766; BYTE $0x0f // pinsrb xmm9, byte [r14 + r11 + 4], 15 LONG $0xe9740f66 // pcmpeqb xmm5, xmm1 QUAD $0x00000110856f0f66 // movdqa xmm0, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0xe8df0f66 // pandn xmm5, xmm0 @@ -27904,83 +29160,80 @@ LBB5_67: QUAD $0x00000120856f0f66 // movdqa xmm0, oword 288[rbp] /* [rip + .LCPI5_18] */ LONG $0xf8df0f66 // pandn xmm7, xmm0 LONG $0xfdeb0f66 // por xmm7, xmm5 - QUAD $0x0000008024948b48 // mov rdx, qword [rsp + 128] - LONG $0x74b60f41; WORD $0x1116 // movzx esi, byte [r14 + rdx + 17] + LONG $0x245c8b4c; BYTE $0x38 // mov r11, qword [rsp + 56] + LONG $0x74b60f43; WORD $0x111e // movzx esi, byte [r14 + r11 + 17] LONG $0xe66e0f66 // movd xmm4, esi LONG $0x740f4466; BYTE $0xc9 // pcmpeqb xmm9, xmm1 QUAD $0x00000130856f0f66 // movdqa xmm0, oword 304[rbp] /* [rip + .LCPI5_19] */ LONG $0xdf0f4466; BYTE $0xc8 // pandn xmm9, xmm0 LONG $0xeb0f4466; BYTE $0xcf // por xmm9, xmm7 - LONG $0x74b60f41; WORD $0x1216 // movzx esi, byte [r14 + rdx + 18] + LONG $0x74b60f43; WORD $0x121e // movzx esi, byte [r14 + r11 + 18] LONG $0xfe6e0f66 // movd xmm7, esi LONG $0xc0760f66 // pcmpeqd xmm0, xmm0 LONG $0xd8f80f66 // psubb xmm3, xmm0 LONG $0xeb0f4466; BYTE $0xcb // por xmm9, xmm3 - LONG $0x74b60f41; WORD $0x1316 // movzx esi, byte [r14 + rdx + 19] + LONG $0x74b60f43; WORD $0x131e // movzx esi, byte [r14 + r11 + 19] LONG $0xee6e0f66 // movd xmm5, esi - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x051654203a0f4166; BYTE $0x01 // pinsrb xmm2, byte [r14 + rdx + 5], 1 - LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] - QUAD $0x051e54203a0f4366; BYTE $0x02 // pinsrb xmm2, byte [r14 + r11 + 5], 2 - LONG $0x246c8b4c; BYTE $0x50 // mov r13, qword [rsp + 80] - QUAD $0x052e54203a0f4366; BYTE $0x03 // pinsrb xmm2, byte [r14 + r13 + 5], 3 + LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] + QUAD $0x053654203a0f4166; BYTE $0x01 // pinsrb xmm2, byte [r14 + rsi + 5], 1 + QUAD $0x052654203a0f4366; BYTE $0x02 // pinsrb xmm2, byte [r14 + r12 + 5], 2 + LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] + QUAD $0x053e54203a0f4366; BYTE $0x03 // pinsrb xmm2, byte [r14 + r15 + 5], 3 QUAD $0x051e54203a0f4166; BYTE $0x04 // pinsrb xmm2, byte [r14 + rbx + 5], 4 - QUAD $0x053e54203a0f4366; BYTE $0x05 // pinsrb xmm2, byte [r14 + r15 + 5], 5 - QUAD $0x0000008824948b4c // mov r10, qword [rsp + 136] - QUAD $0x051654203a0f4366; BYTE $0x06 // pinsrb xmm2, byte [r14 + r10 + 5], 6 - QUAD $0x050e54203a0f4366; BYTE $0x07 // pinsrb xmm2, byte [r14 + r9 + 5], 7 + QUAD $0x051654203a0f4366; BYTE $0x05 // pinsrb xmm2, byte [r14 + r10 + 5], 5 + QUAD $0x050e54203a0f4366; BYTE $0x06 // pinsrb xmm2, byte [r14 + r9 + 5], 6 + QUAD $0x050654203a0f4366; BYTE $0x07 // pinsrb xmm2, byte [r14 + r8 + 5], 7 QUAD $0x050654203a0f4166; BYTE $0x08 // pinsrb xmm2, byte [r14 + rax + 5], 8 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] - QUAD $0x053654203a0f4166; BYTE $0x09 // pinsrb xmm2, byte [r14 + rsi + 5], 9 - QUAD $0x0000009024bc8b4c // mov r15, qword [rsp + 144] - QUAD $0x053e54203a0f4366; BYTE $0x0a // pinsrb xmm2, byte [r14 + r15 + 5], 10 - WORD $0x8949; BYTE $0xf9 // mov r9, rdi - QUAD $0x053e54203a0f4166; BYTE $0x0b // pinsrb xmm2, byte [r14 + rdi + 5], 11 - QUAD $0x050e54203a0f4166; BYTE $0x0c // pinsrb xmm2, byte [r14 + rcx + 5], 12 - QUAD $0x052654203a0f4366; BYTE $0x0d // pinsrb xmm2, byte [r14 + r12 + 5], 13 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x053e54203a0f4166; BYTE $0x0e // pinsrb xmm2, byte [r14 + rdi + 5], 14 - LONG $0x2444894c; BYTE $0x48 // mov qword [rsp + 72], r8 - QUAD $0x050654203a0f4366; BYTE $0x0f // pinsrb xmm2, byte [r14 + r8 + 5], 15 - QUAD $0x061644203a0f4566; BYTE $0x01 // pinsrb xmm8, byte [r14 + rdx + 6], 1 - QUAD $0x061e44203a0f4766; BYTE $0x02 // pinsrb xmm8, byte [r14 + r11 + 6], 2 + QUAD $0x00000088248c8948 // mov qword [rsp + 136], rcx + QUAD $0x050e54203a0f4166; BYTE $0x09 // pinsrb xmm2, byte [r14 + rcx + 5], 9 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x050654203a0f4166; BYTE $0x0a // pinsrb xmm2, byte [r14 + rax + 5], 10 + QUAD $0x051654203a0f4166; BYTE $0x0b // pinsrb xmm2, byte [r14 + rdx + 5], 11 + QUAD $0x053e54203a0f4166; BYTE $0x0c // pinsrb xmm2, byte [r14 + rdi + 5], 12 WORD $0x894d; BYTE $0xeb // mov r11, r13 - QUAD $0x062e44203a0f4766; BYTE $0x03 // pinsrb xmm8, byte [r14 + r13 + 6], 3 + QUAD $0x052e54203a0f4366; BYTE $0x0d // pinsrb xmm2, byte [r14 + r13 + 5], 13 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x050654203a0f4166; BYTE $0x0e // pinsrb xmm2, byte [r14 + rax + 5], 14 + LONG $0x246c8b4c; BYTE $0x50 // mov r13, qword [rsp + 80] + QUAD $0x052e54203a0f4366; BYTE $0x0f // pinsrb xmm2, byte [r14 + r13 + 5], 15 + QUAD $0x063644203a0f4566; BYTE $0x01 // pinsrb xmm8, byte [r14 + rsi + 6], 1 + QUAD $0x062644203a0f4766; BYTE $0x02 // pinsrb xmm8, byte [r14 + r12 + 6], 2 + QUAD $0x063e44203a0f4766; BYTE $0x03 // pinsrb xmm8, byte [r14 + r15 + 6], 3 QUAD $0x061e44203a0f4566; BYTE $0x04 // pinsrb xmm8, byte [r14 + rbx + 6], 4 - LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] - QUAD $0x062e44203a0f4766; BYTE $0x05 // pinsrb xmm8, byte [r14 + r13 + 6], 5 - QUAD $0x061644203a0f4766; BYTE $0x06 // pinsrb xmm8, byte [r14 + r10 + 6], 6 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] - QUAD $0x061e44203a0f4566; BYTE $0x07 // pinsrb xmm8, byte [r14 + rbx + 6], 7 + QUAD $0x061644203a0f4766; BYTE $0x05 // pinsrb xmm8, byte [r14 + r10 + 6], 5 + QUAD $0x060e44203a0f4766; BYTE $0x06 // pinsrb xmm8, byte [r14 + r9 + 6], 6 + QUAD $0x060644203a0f4766; BYTE $0x07 // pinsrb xmm8, byte [r14 + r8 + 6], 7 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x060644203a0f4566; BYTE $0x08 // pinsrb xmm8, byte [r14 + rax + 6], 8 - QUAD $0x063644203a0f4566; BYTE $0x09 // pinsrb xmm8, byte [r14 + rsi + 6], 9 - QUAD $0x063e44203a0f4766; BYTE $0x0a // pinsrb xmm8, byte [r14 + r15 + 6], 10 - QUAD $0x060e44203a0f4766; BYTE $0x0b // pinsrb xmm8, byte [r14 + r9 + 6], 11 - QUAD $0x060e44203a0f4566; BYTE $0x0c // pinsrb xmm8, byte [r14 + rcx + 6], 12 - QUAD $0x062644203a0f4766; BYTE $0x0d // pinsrb xmm8, byte [r14 + r12 + 6], 13 - QUAD $0x063e44203a0f4566; BYTE $0x0e // pinsrb xmm8, byte [r14 + rdi + 6], 14 - QUAD $0x060644203a0f4766; BYTE $0x0f // pinsrb xmm8, byte [r14 + r8 + 6], 15 - QUAD $0x071674203a0f4566; BYTE $0x01 // pinsrb xmm14, byte [r14 + rdx + 7], 1 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x071e74203a0f4566; BYTE $0x02 // pinsrb xmm14, byte [r14 + rbx + 7], 2 - WORD $0x894d; BYTE $0xd8 // mov r8, r11 - QUAD $0x071e74203a0f4766; BYTE $0x03 // pinsrb xmm14, byte [r14 + r11 + 7], 3 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] - QUAD $0x071674203a0f4566; BYTE $0x04 // pinsrb xmm14, byte [r14 + rdx + 7], 4 - QUAD $0x072e74203a0f4766; BYTE $0x05 // pinsrb xmm14, byte [r14 + r13 + 7], 5 - QUAD $0x0000008824948b48 // mov rdx, qword [rsp + 136] - QUAD $0x071674203a0f4566; BYTE $0x06 // pinsrb xmm14, byte [r14 + rdx + 7], 6 - LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] - QUAD $0x070e74203a0f4766; BYTE $0x07 // pinsrb xmm14, byte [r14 + r9 + 7], 7 - QUAD $0x070674203a0f4566; BYTE $0x08 // pinsrb xmm14, byte [r14 + rax + 7], 8 - WORD $0x8949; BYTE $0xc5 // mov r13, rax - QUAD $0x073674203a0f4566; BYTE $0x09 // pinsrb xmm14, byte [r14 + rsi + 7], 9 + QUAD $0x060e44203a0f4566; BYTE $0x09 // pinsrb xmm8, byte [r14 + rcx + 6], 9 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x060644203a0f4566; BYTE $0x0a // pinsrb xmm8, byte [r14 + rax + 6], 10 + QUAD $0x061644203a0f4566; BYTE $0x0b // pinsrb xmm8, byte [r14 + rdx + 6], 11 + QUAD $0x063e44203a0f4566; BYTE $0x0c // pinsrb xmm8, byte [r14 + rdi + 6], 12 + QUAD $0x061e44203a0f4766; BYTE $0x0d // pinsrb xmm8, byte [r14 + r11 + 6], 13 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x060644203a0f4566; BYTE $0x0e // pinsrb xmm8, byte [r14 + rax + 6], 14 + QUAD $0x062e44203a0f4766; BYTE $0x0f // pinsrb xmm8, byte [r14 + r13 + 6], 15 + WORD $0x894c; BYTE $0xe8 // mov rax, r13 + QUAD $0x073674203a0f4566; BYTE $0x01 // pinsrb xmm14, byte [r14 + rsi + 7], 1 + QUAD $0x072674203a0f4766; BYTE $0x02 // pinsrb xmm14, byte [r14 + r12 + 7], 2 + QUAD $0x073e74203a0f4766; BYTE $0x03 // pinsrb xmm14, byte [r14 + r15 + 7], 3 + QUAD $0x071e74203a0f4566; BYTE $0x04 // pinsrb xmm14, byte [r14 + rbx + 7], 4 + WORD $0x8949; BYTE $0xdc // mov r12, rbx + QUAD $0x071674203a0f4766; BYTE $0x05 // pinsrb xmm14, byte [r14 + r10 + 7], 5 + QUAD $0x070e74203a0f4766; BYTE $0x06 // pinsrb xmm14, byte [r14 + r9 + 7], 6 + QUAD $0x070674203a0f4766; BYTE $0x07 // pinsrb xmm14, byte [r14 + r8 + 7], 7 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] + QUAD $0x072e74203a0f4766; BYTE $0x08 // pinsrb xmm14, byte [r14 + r13 + 7], 8 + QUAD $0x070e74203a0f4566; BYTE $0x09 // pinsrb xmm14, byte [r14 + rcx + 7], 9 + LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] QUAD $0x073e74203a0f4766; BYTE $0x0a // pinsrb xmm14, byte [r14 + r15 + 7], 10 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] - QUAD $0x071674203a0f4766; BYTE $0x0b // pinsrb xmm14, byte [r14 + r10 + 7], 11 - QUAD $0x070e74203a0f4566; BYTE $0x0c // pinsrb xmm14, byte [r14 + rcx + 7], 12 - QUAD $0x072674203a0f4766; BYTE $0x0d // pinsrb xmm14, byte [r14 + r12 + 7], 13 - QUAD $0x073e74203a0f4566; BYTE $0x0e // pinsrb xmm14, byte [r14 + rdi + 7], 14 + QUAD $0x071674203a0f4566; BYTE $0x0b // pinsrb xmm14, byte [r14 + rdx + 7], 11 + QUAD $0x073e74203a0f4566; BYTE $0x0c // pinsrb xmm14, byte [r14 + rdi + 7], 12 + QUAD $0x071e74203a0f4766; BYTE $0x0d // pinsrb xmm14, byte [r14 + r11 + 7], 13 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + QUAD $0x070e74203a0f4566; BYTE $0x0e // pinsrb xmm14, byte [r14 + rcx + 7], 14 LONG $0x6f0f4166; BYTE $0xce // movdqa xmm1, xmm14 QUAD $0x00b024b46f0f4466; WORD $0x0000 // movdqa xmm14, oword [rsp + 176] LONG $0x740f4166; BYTE $0xd6 // pcmpeqb xmm2, xmm14 @@ -27990,287 +29243,292 @@ LBB5_67: QUAD $0x00000150856f0f66 // movdqa xmm0, oword 336[rbp] /* [rip + .LCPI5_21] */ LONG $0xdf0f4466; BYTE $0xc0 // pandn xmm8, xmm0 LONG $0xeb0f4466; BYTE $0xc2 // por xmm8, xmm2 - QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] - LONG $0x74b60f43; WORD $0x143e // movzx esi, byte [r14 + r15 + 20] + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] + LONG $0x74b60f43; WORD $0x1406 // movzx esi, byte [r14 + r8 + 20] LONG $0xde6e0f66 // movd xmm3, esi - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] QUAD $0x07064c203a0f4166; BYTE $0x0f // pinsrb xmm1, byte [r14 + rax + 7], 15 LONG $0x740f4166; BYTE $0xce // pcmpeqb xmm1, xmm14 LONG $0x456f0f66; BYTE $0x60 // movdqa xmm0, oword 96[rbp] /* [rip + .LCPI5_6] */ LONG $0xc8df0f66 // pandn xmm1, xmm0 LONG $0xeb0f4166; BYTE $0xc8 // por xmm1, xmm8 - LONG $0x74b60f43; WORD $0x153e // movzx esi, byte [r14 + r15 + 21] + LONG $0x74b60f43; WORD $0x1506 // movzx esi, byte [r14 + r8 + 21] LONG $0xd66e0f66 // movd xmm2, esi - QUAD $0x00010024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 256] + QUAD $0x0000f024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 240] LONG $0x245c8b4c; BYTE $0x10 // mov r11, qword [rsp + 16] QUAD $0x081e44203a0f4366; BYTE $0x01 // pinsrb xmm0, byte [r14 + r11 + 8], 1 + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x081e44203a0f4166; BYTE $0x02 // pinsrb xmm0, byte [r14 + rbx + 8], 2 - QUAD $0x080644203a0f4366; BYTE $0x03 // pinsrb xmm0, byte [r14 + r8 + 8], 3 - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] - QUAD $0x080e44203a0f4166; BYTE $0x04 // pinsrb xmm0, byte [r14 + rcx + 8], 4 - LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] - QUAD $0x083644203a0f4166; BYTE $0x05 // pinsrb xmm0, byte [r14 + rsi + 8], 5 - QUAD $0x081644203a0f4166; BYTE $0x06 // pinsrb xmm0, byte [r14 + rdx + 8], 6 + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + QUAD $0x083e44203a0f4166; BYTE $0x03 // pinsrb xmm0, byte [r14 + rdi + 8], 3 + QUAD $0x082644203a0f4366; BYTE $0x04 // pinsrb xmm0, byte [r14 + r12 + 8], 4 + WORD $0x894c; BYTE $0xe2 // mov rdx, r12 + QUAD $0x081644203a0f4366; BYTE $0x05 // pinsrb xmm0, byte [r14 + r10 + 8], 5 + QUAD $0x0000009024b48b48 // mov rsi, qword [rsp + 144] + QUAD $0x083644203a0f4166; BYTE $0x06 // pinsrb xmm0, byte [r14 + rsi + 8], 6 QUAD $0x080e44203a0f4366; BYTE $0x07 // pinsrb xmm0, byte [r14 + r9 + 8], 7 QUAD $0x082e44203a0f4366; BYTE $0x08 // pinsrb xmm0, byte [r14 + r13 + 8], 8 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x083644203a0f4166; BYTE $0x09 // pinsrb xmm0, byte [r14 + rsi + 8], 9 - QUAD $0x0000009024a48b4c // mov r12, qword [rsp + 144] - QUAD $0x082644203a0f4366; BYTE $0x0a // pinsrb xmm0, byte [r14 + r12 + 8], 10 - QUAD $0x081644203a0f4366; BYTE $0x0b // pinsrb xmm0, byte [r14 + r10 + 8], 11 - LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] - QUAD $0x083644203a0f4166; BYTE $0x0c // pinsrb xmm0, byte [r14 + rsi + 8], 12 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] - QUAD $0x081e44203a0f4166; BYTE $0x0d // pinsrb xmm0, byte [r14 + rbx + 8], 13 - QUAD $0x083e44203a0f4166; BYTE $0x0e // pinsrb xmm0, byte [r14 + rdi + 8], 14 + QUAD $0x083e44203a0f4366; BYTE $0x0a // pinsrb xmm0, byte [r14 + r15 + 8], 10 + QUAD $0x00000098248c8b4c // mov r9, qword [rsp + 152] + QUAD $0x080e44203a0f4366; BYTE $0x0b // pinsrb xmm0, byte [r14 + r9 + 8], 11 + LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] + QUAD $0x082644203a0f4366; BYTE $0x0c // pinsrb xmm0, byte [r14 + r12 + 8], 12 + LONG $0x24748b48; BYTE $0x18 // mov rsi, qword [rsp + 24] + QUAD $0x083644203a0f4166; BYTE $0x0d // pinsrb xmm0, byte [r14 + rsi + 8], 13 + QUAD $0x080e44203a0f4166; BYTE $0x0e // pinsrb xmm0, byte [r14 + rcx + 8], 14 QUAD $0x080644203a0f4166; BYTE $0x0f // pinsrb xmm0, byte [r14 + rax + 8], 15 LONG $0xeb0f4166; BYTE $0xc9 // por xmm1, xmm9 - QUAD $0x000100248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm1 - LONG $0x74b60f43; WORD $0x163e // movzx esi, byte [r14 + r15 + 22] + QUAD $0x0000f0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm1 + LONG $0x74b60f43; WORD $0x1606 // movzx esi, byte [r14 + r8 + 22] LONG $0xce6e0f66 // movd xmm1, esi LONG $0x740f4166; BYTE $0xc6 // pcmpeqb xmm0, xmm14 QUAD $0x091e5c203a0f4766; BYTE $0x01 // pinsrb xmm11, byte [r14 + r11 + 9], 1 - WORD $0x894d; BYTE $0xdf // mov r15, r11 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] - QUAD $0x09165c203a0f4766; BYTE $0x02 // pinsrb xmm11, byte [r14 + r10 + 9], 2 - QUAD $0x09065c203a0f4766; BYTE $0x03 // pinsrb xmm11, byte [r14 + r8 + 9], 3 - WORD $0x894d; BYTE $0xc5 // mov r13, r8 - QUAD $0x090e5c203a0f4566; BYTE $0x04 // pinsrb xmm11, byte [r14 + rcx + 9], 4 - LONG $0x244c8b4c; BYTE $0x38 // mov r9, qword [rsp + 56] - QUAD $0x090e5c203a0f4766; BYTE $0x05 // pinsrb xmm11, byte [r14 + r9 + 9], 5 - QUAD $0x09165c203a0f4566; BYTE $0x06 // pinsrb xmm11, byte [r14 + rdx + 9], 6 - LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] - QUAD $0x091e5c203a0f4766; BYTE $0x07 // pinsrb xmm11, byte [r14 + r11 + 9], 7 - QUAD $0x0000009824b48b48 // mov rsi, qword [rsp + 152] - QUAD $0x09365c203a0f4566; BYTE $0x08 // pinsrb xmm11, byte [r14 + rsi + 9], 8 - LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] - QUAD $0x09065c203a0f4766; BYTE $0x09 // pinsrb xmm11, byte [r14 + r8 + 9], 9 - QUAD $0x09265c203a0f4766; BYTE $0x0a // pinsrb xmm11, byte [r14 + r12 + 9], 10 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] - QUAD $0x09165c203a0f4566; BYTE $0x0b // pinsrb xmm11, byte [r14 + rdx + 9], 11 - LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] - QUAD $0x093e5c203a0f4566; BYTE $0x0c // pinsrb xmm11, byte [r14 + rdi + 9], 12 + WORD $0x894d; BYTE $0xda // mov r10, r11 + QUAD $0x091e5c203a0f4566; BYTE $0x02 // pinsrb xmm11, byte [r14 + rbx + 9], 2 + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + QUAD $0x093e5c203a0f4566; BYTE $0x03 // pinsrb xmm11, byte [r14 + rdi + 9], 3 + QUAD $0x09165c203a0f4566; BYTE $0x04 // pinsrb xmm11, byte [r14 + rdx + 9], 4 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x090e5c203a0f4566; BYTE $0x05 // pinsrb xmm11, byte [r14 + rcx + 9], 5 + QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] + QUAD $0x091e5c203a0f4766; BYTE $0x06 // pinsrb xmm11, byte [r14 + r11 + 9], 6 + LONG $0x24748b48; BYTE $0x58 // mov rsi, qword [rsp + 88] + QUAD $0x09365c203a0f4566; BYTE $0x07 // pinsrb xmm11, byte [r14 + rsi + 9], 7 + QUAD $0x092e5c203a0f4766; BYTE $0x08 // pinsrb xmm11, byte [r14 + r13 + 9], 8 + QUAD $0x00000088248c8b48 // mov rcx, qword [rsp + 136] + QUAD $0x090e5c203a0f4566; BYTE $0x09 // pinsrb xmm11, byte [r14 + rcx + 9], 9 + QUAD $0x093e5c203a0f4766; BYTE $0x0a // pinsrb xmm11, byte [r14 + r15 + 9], 10 + WORD $0x894c; BYTE $0xca // mov rdx, r9 + QUAD $0x090e5c203a0f4766; BYTE $0x0b // pinsrb xmm11, byte [r14 + r9 + 9], 11 + WORD $0x894c; BYTE $0xe7 // mov rdi, r12 + QUAD $0x09265c203a0f4766; BYTE $0x0c // pinsrb xmm11, byte [r14 + r12 + 9], 12 + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] QUAD $0x091e5c203a0f4566; BYTE $0x0d // pinsrb xmm11, byte [r14 + rbx + 9], 13 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x09065c203a0f4566; BYTE $0x0e // pinsrb xmm11, byte [r14 + rax + 9], 14 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] - QUAD $0x090e5c203a0f4566; BYTE $0x0f // pinsrb xmm11, byte [r14 + rcx + 9], 15 - QUAD $0x0a3e64203a0f4766; BYTE $0x01 // pinsrb xmm12, byte [r14 + r15 + 10], 1 + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] + QUAD $0x09065c203a0f4766; BYTE $0x0e // pinsrb xmm11, byte [r14 + r8 + 9], 14 + LONG $0x244c8b4c; BYTE $0x50 // mov r9, qword [rsp + 80] + QUAD $0x090e5c203a0f4766; BYTE $0x0f // pinsrb xmm11, byte [r14 + r9 + 9], 15 + QUAD $0x0a1664203a0f4766; BYTE $0x01 // pinsrb xmm12, byte [r14 + r10 + 10], 1 + LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] QUAD $0x0a1664203a0f4766; BYTE $0x02 // pinsrb xmm12, byte [r14 + r10 + 10], 2 - QUAD $0x0a2e64203a0f4766; BYTE $0x03 // pinsrb xmm12, byte [r14 + r13 + 10], 3 - LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] - QUAD $0x0a1664203a0f4766; BYTE $0x04 // pinsrb xmm12, byte [r14 + r10 + 10], 4 - QUAD $0x0a0e64203a0f4766; BYTE $0x05 // pinsrb xmm12, byte [r14 + r9 + 10], 5 - QUAD $0x00000088248c8b4c // mov r9, qword [rsp + 136] - QUAD $0x0a0e64203a0f4766; BYTE $0x06 // pinsrb xmm12, byte [r14 + r9 + 10], 6 - QUAD $0x0a1e64203a0f4766; BYTE $0x07 // pinsrb xmm12, byte [r14 + r11 + 10], 7 - QUAD $0x0a3664203a0f4566; BYTE $0x08 // pinsrb xmm12, byte [r14 + rsi + 10], 8 - QUAD $0x0a0664203a0f4766; BYTE $0x09 // pinsrb xmm12, byte [r14 + r8 + 10], 9 - QUAD $0x0a2664203a0f4766; BYTE $0x0a // pinsrb xmm12, byte [r14 + r12 + 10], 10 + QUAD $0x0a0664203a0f4566; BYTE $0x03 // pinsrb xmm12, byte [r14 + rax + 10], 3 + LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] + QUAD $0x0a2664203a0f4766; BYTE $0x04 // pinsrb xmm12, byte [r14 + r12 + 10], 4 + QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] + QUAD $0x0a1e64203a0f4566; BYTE $0x05 // pinsrb xmm12, byte [r14 + rbx + 10], 5 + QUAD $0x0a1e64203a0f4766; BYTE $0x06 // pinsrb xmm12, byte [r14 + r11 + 10], 6 + QUAD $0x0a3664203a0f4566; BYTE $0x07 // pinsrb xmm12, byte [r14 + rsi + 10], 7 + QUAD $0x0a2e64203a0f4766; BYTE $0x08 // pinsrb xmm12, byte [r14 + r13 + 10], 8 + QUAD $0x0a0e64203a0f4566; BYTE $0x09 // pinsrb xmm12, byte [r14 + rcx + 10], 9 + QUAD $0x0a3e64203a0f4766; BYTE $0x0a // pinsrb xmm12, byte [r14 + r15 + 10], 10 QUAD $0x0a1664203a0f4566; BYTE $0x0b // pinsrb xmm12, byte [r14 + rdx + 10], 11 QUAD $0x0a3e64203a0f4566; BYTE $0x0c // pinsrb xmm12, byte [r14 + rdi + 10], 12 + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] QUAD $0x0a1e64203a0f4566; BYTE $0x0d // pinsrb xmm12, byte [r14 + rbx + 10], 13 - QUAD $0x0a0664203a0f4566; BYTE $0x0e // pinsrb xmm12, byte [r14 + rax + 10], 14 - WORD $0x8948; BYTE $0xc3 // mov rbx, rax - QUAD $0x0a0e64203a0f4566; BYTE $0x0f // pinsrb xmm12, byte [r14 + rcx + 10], 15 - QUAD $0x0b3e6c203a0f4766; BYTE $0x01 // pinsrb xmm13, byte [r14 + r15 + 11], 1 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] - QUAD $0x0b2e6c203a0f4766; BYTE $0x02 // pinsrb xmm13, byte [r14 + r13 + 11], 2 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0a0664203a0f4766; BYTE $0x0e // pinsrb xmm12, byte [r14 + r8 + 10], 14 + QUAD $0x0a0e64203a0f4766; BYTE $0x0f // pinsrb xmm12, byte [r14 + r9 + 10], 15 + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] + QUAD $0x0b1e6c203a0f4566; BYTE $0x01 // pinsrb xmm13, byte [r14 + rbx + 11], 1 + QUAD $0x0b166c203a0f4766; BYTE $0x02 // pinsrb xmm13, byte [r14 + r10 + 11], 2 + WORD $0x894d; BYTE $0xd4 // mov r12, r10 QUAD $0x0b066c203a0f4566; BYTE $0x03 // pinsrb xmm13, byte [r14 + rax + 11], 3 + LONG $0x24548b4c; BYTE $0x78 // mov r10, qword [rsp + 120] QUAD $0x0b166c203a0f4766; BYTE $0x04 // pinsrb xmm13, byte [r14 + r10 + 11], 4 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x0b066c203a0f4566; BYTE $0x05 // pinsrb xmm13, byte [r14 + rax + 11], 5 - QUAD $0x0b0e6c203a0f4766; BYTE $0x06 // pinsrb xmm13, byte [r14 + r9 + 11], 6 - QUAD $0x0b1e6c203a0f4766; BYTE $0x07 // pinsrb xmm13, byte [r14 + r11 + 11], 7 - QUAD $0x0b366c203a0f4566; BYTE $0x08 // pinsrb xmm13, byte [r14 + rsi + 11], 8 - QUAD $0x0b066c203a0f4766; BYTE $0x09 // pinsrb xmm13, byte [r14 + r8 + 11], 9 - WORD $0x894c; BYTE $0xc0 // mov rax, r8 - QUAD $0x0b266c203a0f4766; BYTE $0x0a // pinsrb xmm13, byte [r14 + r12 + 11], 10 + QUAD $0x0b1e6c203a0f4766; BYTE $0x06 // pinsrb xmm13, byte [r14 + r11 + 11], 6 + QUAD $0x0b366c203a0f4566; BYTE $0x07 // pinsrb xmm13, byte [r14 + rsi + 11], 7 + QUAD $0x0b2e6c203a0f4766; BYTE $0x08 // pinsrb xmm13, byte [r14 + r13 + 11], 8 + QUAD $0x0b0e6c203a0f4566; BYTE $0x09 // pinsrb xmm13, byte [r14 + rcx + 11], 9 + QUAD $0x0b3e6c203a0f4766; BYTE $0x0a // pinsrb xmm13, byte [r14 + r15 + 11], 10 + WORD $0x894c; BYTE $0xf9 // mov rcx, r15 QUAD $0x0b166c203a0f4566; BYTE $0x0b // pinsrb xmm13, byte [r14 + rdx + 11], 11 QUAD $0x0b3e6c203a0f4566; BYTE $0x0c // pinsrb xmm13, byte [r14 + rdi + 11], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0b366c203a0f4566; BYTE $0x0d // pinsrb xmm13, byte [r14 + rsi + 11], 13 - QUAD $0x0b1e6c203a0f4566; BYTE $0x0e // pinsrb xmm13, byte [r14 + rbx + 11], 14 - QUAD $0x0b0e6c203a0f4566; BYTE $0x0f // pinsrb xmm13, byte [r14 + rcx + 11], 15 + LONG $0x246c8b4c; BYTE $0x18 // mov r13, qword [rsp + 24] + QUAD $0x0b2e6c203a0f4766; BYTE $0x0d // pinsrb xmm13, byte [r14 + r13 + 11], 13 + QUAD $0x0b066c203a0f4766; BYTE $0x0e // pinsrb xmm13, byte [r14 + r8 + 11], 14 + QUAD $0x0b0e6c203a0f4766; BYTE $0x0f // pinsrb xmm13, byte [r14 + r9 + 11], 15 LONG $0x740f4566; BYTE $0xde // pcmpeqb xmm11, xmm14 QUAD $0x0001009ddf0f4466; BYTE $0x00 // pandn xmm11, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xfc0f4466; BYTE $0xd8 // paddb xmm11, xmm0 - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] - LONG $0x74b60f41; WORD $0x170e // movzx esi, byte [r14 + rcx + 23] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x74b60f41; WORD $0x1706 // movzx esi, byte [r14 + rax + 23] LONG $0x6e0f4466; BYTE $0xc6 // movd xmm8, esi LONG $0x740f4566; BYTE $0xe6 // pcmpeqb xmm12, xmm14 QUAD $0x000110a5df0f4466; BYTE $0x00 // pandn xmm12, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0x740f4566; BYTE $0xee // pcmpeqb xmm13, xmm14 QUAD $0x000120addf0f4466; BYTE $0x00 // pandn xmm13, oword 288[rbp] /* [rip + .LCPI5_18] */ LONG $0xeb0f4566; BYTE $0xec // por xmm13, xmm12 - LONG $0x74b60f41; WORD $0x180e // movzx esi, byte [r14 + rcx + 24] + LONG $0x74b60f41; WORD $0x1806 // movzx esi, byte [r14 + rax + 24] LONG $0x6e0f4466; BYTE $0xe6 // movd xmm12, esi - QUAD $0x00e0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 224] - QUAD $0x0c3e4c203a0f4766; BYTE $0x01 // pinsrb xmm9, byte [r14 + r15 + 12], 1 - LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] - QUAD $0x0c2e4c203a0f4766; BYTE $0x02 // pinsrb xmm9, byte [r14 + r13 + 12], 2 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] + QUAD $0x00d0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 208] + QUAD $0x0c1e4c203a0f4566; BYTE $0x01 // pinsrb xmm9, byte [r14 + rbx + 12], 1 + QUAD $0x0c264c203a0f4766; BYTE $0x02 // pinsrb xmm9, byte [r14 + r12 + 12], 2 + LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] QUAD $0x0c3e4c203a0f4766; BYTE $0x03 // pinsrb xmm9, byte [r14 + r15 + 12], 3 - LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] - QUAD $0x0c1e4c203a0f4566; BYTE $0x04 // pinsrb xmm9, byte [r14 + rbx + 12], 4 - LONG $0x24548b4c; BYTE $0x38 // mov r10, qword [rsp + 56] + WORD $0x894c; BYTE $0xd3 // mov rbx, r10 + QUAD $0x0c164c203a0f4766; BYTE $0x04 // pinsrb xmm9, byte [r14 + r10 + 12], 4 + QUAD $0x0000008024948b4c // mov r10, qword [rsp + 128] QUAD $0x0c164c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r10 + 12], 5 - QUAD $0x0c0e4c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r9 + 12], 6 - WORD $0x894d; BYTE $0xd8 // mov r8, r11 + WORD $0x894d; BYTE $0xd9 // mov r9, r11 + QUAD $0x0c1e4c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r11 + 12], 6 + LONG $0x245c8b4c; BYTE $0x58 // mov r11, qword [rsp + 88] QUAD $0x0c1e4c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r11 + 12], 7 - QUAD $0x00000098249c8b4c // mov r11, qword [rsp + 152] - QUAD $0x0c1e4c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r11 + 12], 8 - WORD $0x8949; BYTE $0xc4 // mov r12, rax - QUAD $0x0c064c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rax + 12], 9 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] + QUAD $0x0c064c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r8 + 12], 8 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] + QUAD $0x0c364c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rsi + 12], 9 QUAD $0x0c0e4c203a0f4566; BYTE $0x0a // pinsrb xmm9, byte [r14 + rcx + 12], 10 QUAD $0x0c164c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rdx + 12], 11 QUAD $0x0c3e4c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rdi + 12], 12 - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] - QUAD $0x0c364c203a0f4566; BYTE $0x0d // pinsrb xmm9, byte [r14 + rsi + 12], 13 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0c2e4c203a0f4766; BYTE $0x0d // pinsrb xmm9, byte [r14 + r13 + 12], 13 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0c064c203a0f4566; BYTE $0x0e // pinsrb xmm9, byte [r14 + rax + 12], 14 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0c064c203a0f4566; BYTE $0x0f // pinsrb xmm9, byte [r14 + rax + 12], 15 LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x0d0674203a0f4166; BYTE $0x01 // pinsrb xmm6, byte [r14 + rax + 13], 1 - QUAD $0x0d2e74203a0f4366; BYTE $0x02 // pinsrb xmm6, byte [r14 + r13 + 13], 2 + QUAD $0x0d2674203a0f4366; BYTE $0x02 // pinsrb xmm6, byte [r14 + r12 + 13], 2 QUAD $0x0d3e74203a0f4366; BYTE $0x03 // pinsrb xmm6, byte [r14 + r15 + 13], 3 QUAD $0x0d1e74203a0f4166; BYTE $0x04 // pinsrb xmm6, byte [r14 + rbx + 13], 4 QUAD $0x0d1674203a0f4366; BYTE $0x05 // pinsrb xmm6, byte [r14 + r10 + 13], 5 QUAD $0x0d0e74203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r9 + 13], 6 - QUAD $0x0d0674203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r8 + 13], 7 - QUAD $0x0d1e74203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r11 + 13], 8 - QUAD $0x0d2674203a0f4366; BYTE $0x09 // pinsrb xmm6, byte [r14 + r12 + 13], 9 + QUAD $0x0d1e74203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r11 + 13], 7 + QUAD $0x0d0674203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r8 + 13], 8 + QUAD $0x0d3674203a0f4166; BYTE $0x09 // pinsrb xmm6, byte [r14 + rsi + 13], 9 QUAD $0x0d0e74203a0f4166; BYTE $0x0a // pinsrb xmm6, byte [r14 + rcx + 13], 10 QUAD $0x0d1674203a0f4166; BYTE $0x0b // pinsrb xmm6, byte [r14 + rdx + 13], 11 QUAD $0x0d3e74203a0f4166; BYTE $0x0c // pinsrb xmm6, byte [r14 + rdi + 13], 12 - QUAD $0x0d3674203a0f4166; BYTE $0x0d // pinsrb xmm6, byte [r14 + rsi + 13], 13 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x0d0674203a0f4166; BYTE $0x0e // pinsrb xmm6, byte [r14 + rax + 13], 14 - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0d2e74203a0f4366; BYTE $0x0d // pinsrb xmm6, byte [r14 + r13 + 13], 13 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x0d2e74203a0f4366; BYTE $0x0e // pinsrb xmm6, byte [r14 + r13 + 13], 14 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0d0674203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rax + 13], 15 LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x0e067c203a0f4566; BYTE $0x01 // pinsrb xmm15, byte [r14 + rax + 14], 1 - QUAD $0x0e2e7c203a0f4766; BYTE $0x02 // pinsrb xmm15, byte [r14 + r13 + 14], 2 + QUAD $0x0e267c203a0f4766; BYTE $0x02 // pinsrb xmm15, byte [r14 + r12 + 14], 2 QUAD $0x0e3e7c203a0f4766; BYTE $0x03 // pinsrb xmm15, byte [r14 + r15 + 14], 3 QUAD $0x0e1e7c203a0f4566; BYTE $0x04 // pinsrb xmm15, byte [r14 + rbx + 14], 4 QUAD $0x0e167c203a0f4766; BYTE $0x05 // pinsrb xmm15, byte [r14 + r10 + 14], 5 - WORD $0x894c; BYTE $0xd3 // mov rbx, r10 + WORD $0x894d; BYTE $0xd4 // mov r12, r10 QUAD $0x0e0e7c203a0f4766; BYTE $0x06 // pinsrb xmm15, byte [r14 + r9 + 14], 6 - QUAD $0x0e067c203a0f4766; BYTE $0x07 // pinsrb xmm15, byte [r14 + r8 + 14], 7 - QUAD $0x0e1e7c203a0f4766; BYTE $0x08 // pinsrb xmm15, byte [r14 + r11 + 14], 8 - QUAD $0x0e267c203a0f4766; BYTE $0x09 // pinsrb xmm15, byte [r14 + r12 + 14], 9 + WORD $0x894d; BYTE $0xca // mov r10, r9 + QUAD $0x0e1e7c203a0f4766; BYTE $0x07 // pinsrb xmm15, byte [r14 + r11 + 14], 7 + WORD $0x894d; BYTE $0xd9 // mov r9, r11 + QUAD $0x0e067c203a0f4766; BYTE $0x08 // pinsrb xmm15, byte [r14 + r8 + 14], 8 + QUAD $0x0e367c203a0f4566; BYTE $0x09 // pinsrb xmm15, byte [r14 + rsi + 14], 9 QUAD $0x0e0e7c203a0f4566; BYTE $0x0a // pinsrb xmm15, byte [r14 + rcx + 14], 10 - WORD $0x8949; BYTE $0xcc // mov r12, rcx + WORD $0x8949; BYTE $0xcb // mov r11, rcx QUAD $0x0e167c203a0f4566; BYTE $0x0b // pinsrb xmm15, byte [r14 + rdx + 14], 11 - WORD $0x8949; BYTE $0xd2 // mov r10, rdx QUAD $0x0e3e7c203a0f4566; BYTE $0x0c // pinsrb xmm15, byte [r14 + rdi + 14], 12 - QUAD $0x0e367c203a0f4566; BYTE $0x0d // pinsrb xmm15, byte [r14 + rsi + 14], 13 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x0e067c203a0f4566; BYTE $0x0e // pinsrb xmm15, byte [r14 + rax + 14], 14 + WORD $0x8948; BYTE $0xfa // mov rdx, rdi + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] + QUAD $0x0e1e7c203a0f4566; BYTE $0x0d // pinsrb xmm15, byte [r14 + rbx + 14], 13 + QUAD $0x0e2e7c203a0f4766; BYTE $0x0e // pinsrb xmm15, byte [r14 + r13 + 14], 14 LONG $0x740f4566; BYTE $0xce // pcmpeqb xmm9, xmm14 QUAD $0x0001308ddf0f4466; BYTE $0x00 // pandn xmm9, oword 304[rbp] /* [rip + .LCPI5_19] */ LONG $0xeb0f4566; BYTE $0xcd // por xmm9, xmm13 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - LONG $0x74b60f41; WORD $0x1906 // movzx esi, byte [r14 + rax + 25] + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x74b60f41; WORD $0x190e // movzx esi, byte [r14 + rcx + 25] LONG $0x6e0f4466; BYTE $0xee // movd xmm13, esi QUAD $0x0001609df80f4466; BYTE $0x00 // psubb xmm11, oword 352[rbp] /* [rip + .LCPI5_22] */ LONG $0xeb0f4566; BYTE $0xcb // por xmm9, xmm11 - LONG $0x74b60f41; WORD $0x1a06 // movzx esi, byte [r14 + rax + 26] + LONG $0x74b60f41; WORD $0x1a0e // movzx esi, byte [r14 + rcx + 26] LONG $0xc66e0f66 // movd xmm0, esi - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] - QUAD $0x0e0e7c203a0f4566; BYTE $0x0f // pinsrb xmm15, byte [r14 + rcx + 14], 15 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0e067c203a0f4566; BYTE $0x0f // pinsrb xmm15, byte [r14 + rax + 14], 15 LONG $0x740f4166; BYTE $0xf6 // pcmpeqb xmm6, xmm14 QUAD $0x00000140b5df0f66 // pandn xmm6, oword 320[rbp] /* [rip + .LCPI5_20] */ LONG $0x740f4566; BYTE $0xfe // pcmpeqb xmm15, xmm14 QUAD $0x000150bddf0f4466; BYTE $0x00 // pandn xmm15, oword 336[rbp] /* [rip + .LCPI5_21] */ LONG $0xeb0f4466; BYTE $0xfe // por xmm15, xmm6 - LONG $0x74b60f41; WORD $0x1b06 // movzx esi, byte [r14 + rax + 27] + LONG $0x74b60f41; WORD $0x1b0e // movzx esi, byte [r14 + rcx + 27] LONG $0x6e0f4466; BYTE $0xde // movd xmm11, esi QUAD $0x0000c024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 192] - LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] - QUAD $0x0f3e74203a0f4166; BYTE $0x01 // pinsrb xmm6, byte [r14 + rdi + 15], 1 - WORD $0x894c; BYTE $0xea // mov rdx, r13 - QUAD $0x0f2e74203a0f4366; BYTE $0x02 // pinsrb xmm6, byte [r14 + r13 + 15], 2 - QUAD $0x0f3e74203a0f4366; BYTE $0x03 // pinsrb xmm6, byte [r14 + r15 + 15], 3 - LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] - QUAD $0x0f0e74203a0f4366; BYTE $0x04 // pinsrb xmm6, byte [r14 + r9 + 15], 4 - QUAD $0x0f1e74203a0f4166; BYTE $0x05 // pinsrb xmm6, byte [r14 + rbx + 15], 5 - QUAD $0x00000088249c8b48 // mov rbx, qword [rsp + 136] - QUAD $0x0f1e74203a0f4166; BYTE $0x06 // pinsrb xmm6, byte [r14 + rbx + 15], 6 - QUAD $0x0f0674203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r8 + 15], 7 - QUAD $0x0f1e74203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r11 + 15], 8 - LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] - QUAD $0x0f0674203a0f4366; BYTE $0x09 // pinsrb xmm6, byte [r14 + r8 + 15], 9 - QUAD $0x0f2674203a0f4366; BYTE $0x0a // pinsrb xmm6, byte [r14 + r12 + 15], 10 + LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] + QUAD $0x0f3e74203a0f4366; BYTE $0x01 // pinsrb xmm6, byte [r14 + r15 + 15], 1 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x0f0e74203a0f4166; BYTE $0x02 // pinsrb xmm6, byte [r14 + rcx + 15], 2 + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + QUAD $0x0f3e74203a0f4166; BYTE $0x03 // pinsrb xmm6, byte [r14 + rdi + 15], 3 + LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] + QUAD $0x0f0674203a0f4366; BYTE $0x04 // pinsrb xmm6, byte [r14 + r8 + 15], 4 + QUAD $0x0f2674203a0f4366; BYTE $0x05 // pinsrb xmm6, byte [r14 + r12 + 15], 5 + WORD $0x894c; BYTE $0xd1 // mov rcx, r10 + QUAD $0x0f1674203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r10 + 15], 6 + QUAD $0x0f0e74203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r9 + 15], 7 + LONG $0x24748b48; BYTE $0x48 // mov rsi, qword [rsp + 72] + QUAD $0x0f3674203a0f4166; BYTE $0x08 // pinsrb xmm6, byte [r14 + rsi + 15], 8 + QUAD $0x00000088248c8b4c // mov r9, qword [rsp + 136] + QUAD $0x0f0e74203a0f4366; BYTE $0x09 // pinsrb xmm6, byte [r14 + r9 + 15], 9 + QUAD $0x0f1e74203a0f4366; BYTE $0x0a // pinsrb xmm6, byte [r14 + r11 + 15], 10 + QUAD $0x0000009824948b4c // mov r10, qword [rsp + 152] QUAD $0x0f1674203a0f4366; BYTE $0x0b // pinsrb xmm6, byte [r14 + r10 + 15], 11 - WORD $0x894d; BYTE $0xd7 // mov r15, r10 - LONG $0x24548b4c; BYTE $0x78 // mov r10, qword [rsp + 120] - QUAD $0x0f1674203a0f4366; BYTE $0x0c // pinsrb xmm6, byte [r14 + r10 + 15], 12 - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] - QUAD $0x0f2e74203a0f4366; BYTE $0x0d // pinsrb xmm6, byte [r14 + r13 + 15], 13 - LONG $0x24748b48; BYTE $0x40 // mov rsi, qword [rsp + 64] - QUAD $0x0f3674203a0f4166; BYTE $0x0e // pinsrb xmm6, byte [r14 + rsi + 15], 14 - QUAD $0x0f0e74203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rcx + 15], 15 + QUAD $0x0f1674203a0f4166; BYTE $0x0c // pinsrb xmm6, byte [r14 + rdx + 15], 12 + QUAD $0x0f1e74203a0f4166; BYTE $0x0d // pinsrb xmm6, byte [r14 + rbx + 15], 13 + QUAD $0x0f2e74203a0f4366; BYTE $0x0e // pinsrb xmm6, byte [r14 + r13 + 15], 14 + QUAD $0x0f0674203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rax + 15], 15 LONG $0x740f4166; BYTE $0xf6 // pcmpeqb xmm6, xmm14 LONG $0x75df0f66; BYTE $0x60 // pandn xmm6, oword 96[rbp] /* [rip + .LCPI5_6] */ LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] LONG $0x74b60f41; WORD $0x1c06 // movzx esi, byte [r14 + rax + 28] LONG $0x6e0f4466; BYTE $0xfe // movd xmm15, esi LONG $0xeb0f4166; BYTE $0xf1 // por xmm6, xmm9 QUAD $0x0000c024b47f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm6 LONG $0x74b60f41; WORD $0x1d06 // movzx esi, byte [r14 + rax + 29] LONG $0x6e0f4466; BYTE $0xce // movd xmm9, esi - WORD $0x8948; BYTE $0xfe // mov rsi, rdi - QUAD $0x103e54203a0f4566; BYTE $0x01 // pinsrb xmm10, byte [r14 + rdi + 16], 1 - WORD $0x8948; BYTE $0xd0 // mov rax, rdx - QUAD $0x101654203a0f4566; BYTE $0x02 // pinsrb xmm10, byte [r14 + rdx + 16], 2 - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] - QUAD $0x101654203a0f4566; BYTE $0x03 // pinsrb xmm10, byte [r14 + rdx + 16], 3 - QUAD $0x100e54203a0f4766; BYTE $0x04 // pinsrb xmm10, byte [r14 + r9 + 16], 4 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] - QUAD $0x103e54203a0f4566; BYTE $0x05 // pinsrb xmm10, byte [r14 + rdi + 16], 5 - QUAD $0x101e54203a0f4566; BYTE $0x06 // pinsrb xmm10, byte [r14 + rbx + 16], 6 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] - QUAD $0x101e54203a0f4566; BYTE $0x07 // pinsrb xmm10, byte [r14 + rbx + 16], 7 - QUAD $0x101e54203a0f4766; BYTE $0x08 // pinsrb xmm10, byte [r14 + r11 + 16], 8 - QUAD $0x100654203a0f4766; BYTE $0x09 // pinsrb xmm10, byte [r14 + r8 + 16], 9 - QUAD $0x102654203a0f4766; BYTE $0x0a // pinsrb xmm10, byte [r14 + r12 + 16], 10 - QUAD $0x103e54203a0f4766; BYTE $0x0b // pinsrb xmm10, byte [r14 + r15 + 16], 11 - QUAD $0x101654203a0f4766; BYTE $0x0c // pinsrb xmm10, byte [r14 + r10 + 16], 12 - QUAD $0x102e54203a0f4766; BYTE $0x0d // pinsrb xmm10, byte [r14 + r13 + 16], 13 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x100e54203a0f4566; BYTE $0x0e // pinsrb xmm10, byte [r14 + rcx + 16], 14 - LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] - QUAD $0x102e54203a0f4766; BYTE $0x0f // pinsrb xmm10, byte [r14 + r13 + 16], 15 + WORD $0x894c; BYTE $0xfe // mov rsi, r15 + QUAD $0x103e54203a0f4766; BYTE $0x01 // pinsrb xmm10, byte [r14 + r15 + 16], 1 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x100654203a0f4566; BYTE $0x02 // pinsrb xmm10, byte [r14 + rax + 16], 2 + QUAD $0x103e54203a0f4566; BYTE $0x03 // pinsrb xmm10, byte [r14 + rdi + 16], 3 + WORD $0x8949; BYTE $0xfb // mov r11, rdi + QUAD $0x100654203a0f4766; BYTE $0x04 // pinsrb xmm10, byte [r14 + r8 + 16], 4 + QUAD $0x102654203a0f4766; BYTE $0x05 // pinsrb xmm10, byte [r14 + r12 + 16], 5 + QUAD $0x100e54203a0f4566; BYTE $0x06 // pinsrb xmm10, byte [r14 + rcx + 16], 6 + LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] + QUAD $0x101654203a0f4566; BYTE $0x07 // pinsrb xmm10, byte [r14 + rdx + 16], 7 + LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] + QUAD $0x100e54203a0f4566; BYTE $0x08 // pinsrb xmm10, byte [r14 + rcx + 16], 8 + QUAD $0x100e54203a0f4766; BYTE $0x09 // pinsrb xmm10, byte [r14 + r9 + 16], 9 + LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] + QUAD $0x103e54203a0f4766; BYTE $0x0a // pinsrb xmm10, byte [r14 + r15 + 16], 10 + QUAD $0x101654203a0f4766; BYTE $0x0b // pinsrb xmm10, byte [r14 + r10 + 16], 11 + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + QUAD $0x103e54203a0f4566; BYTE $0x0c // pinsrb xmm10, byte [r14 + rdi + 16], 12 + QUAD $0x101e54203a0f4566; BYTE $0x0d // pinsrb xmm10, byte [r14 + rbx + 16], 13 + QUAD $0x102e54203a0f4766; BYTE $0x0e // pinsrb xmm10, byte [r14 + r13 + 16], 14 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x100654203a0f4566; BYTE $0x0f // pinsrb xmm10, byte [r14 + rax + 16], 15 QUAD $0x113664203a0f4166; BYTE $0x01 // pinsrb xmm4, byte [r14 + rsi + 17], 1 - QUAD $0x110664203a0f4166; BYTE $0x02 // pinsrb xmm4, byte [r14 + rax + 17], 2 - QUAD $0x111664203a0f4166; BYTE $0x03 // pinsrb xmm4, byte [r14 + rdx + 17], 3 - QUAD $0x110e64203a0f4366; BYTE $0x04 // pinsrb xmm4, byte [r14 + r9 + 17], 4 - QUAD $0x113e64203a0f4166; BYTE $0x05 // pinsrb xmm4, byte [r14 + rdi + 17], 5 - QUAD $0x0000008824948b4c // mov r10, qword [rsp + 136] + LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + QUAD $0x113664203a0f4166; BYTE $0x02 // pinsrb xmm4, byte [r14 + rsi + 17], 2 + QUAD $0x111e64203a0f4366; BYTE $0x03 // pinsrb xmm4, byte [r14 + r11 + 17], 3 + QUAD $0x110664203a0f4366; BYTE $0x04 // pinsrb xmm4, byte [r14 + r8 + 17], 4 + QUAD $0x112664203a0f4366; BYTE $0x05 // pinsrb xmm4, byte [r14 + r12 + 17], 5 + QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] QUAD $0x111664203a0f4366; BYTE $0x06 // pinsrb xmm4, byte [r14 + r10 + 17], 6 - WORD $0x8949; BYTE $0xd9 // mov r9, rbx - QUAD $0x111e64203a0f4166; BYTE $0x07 // pinsrb xmm4, byte [r14 + rbx + 17], 7 - QUAD $0x111e64203a0f4366; BYTE $0x08 // pinsrb xmm4, byte [r14 + r11 + 17], 8 - QUAD $0x110664203a0f4366; BYTE $0x09 // pinsrb xmm4, byte [r14 + r8 + 17], 9 - QUAD $0x112664203a0f4366; BYTE $0x0a // pinsrb xmm4, byte [r14 + r12 + 17], 10 + QUAD $0x111664203a0f4166; BYTE $0x07 // pinsrb xmm4, byte [r14 + rdx + 17], 7 + QUAD $0x110e64203a0f4166; BYTE $0x08 // pinsrb xmm4, byte [r14 + rcx + 17], 8 + QUAD $0x110e64203a0f4366; BYTE $0x09 // pinsrb xmm4, byte [r14 + r9 + 17], 9 WORD $0x894d; BYTE $0xf8 // mov r8, r15 - QUAD $0x113e64203a0f4366; BYTE $0x0b // pinsrb xmm4, byte [r14 + r15 + 17], 11 - LONG $0x247c8b48; BYTE $0x78 // mov rdi, qword [rsp + 120] + QUAD $0x113e64203a0f4366; BYTE $0x0a // pinsrb xmm4, byte [r14 + r15 + 17], 10 + QUAD $0x0000009824948b48 // mov rdx, qword [rsp + 152] + QUAD $0x111664203a0f4166; BYTE $0x0b // pinsrb xmm4, byte [r14 + rdx + 17], 11 QUAD $0x113e64203a0f4166; BYTE $0x0c // pinsrb xmm4, byte [r14 + rdi + 17], 12 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] - QUAD $0x111664203a0f4166; BYTE $0x0d // pinsrb xmm4, byte [r14 + rdx + 17], 13 - QUAD $0x110e64203a0f4166; BYTE $0x0e // pinsrb xmm4, byte [r14 + rcx + 17], 14 - QUAD $0x112e64203a0f4366; BYTE $0x0f // pinsrb xmm4, byte [r14 + r13 + 17], 15 + WORD $0x8949; BYTE $0xfb // mov r11, rdi + QUAD $0x111e64203a0f4166; BYTE $0x0d // pinsrb xmm4, byte [r14 + rbx + 17], 13 + QUAD $0x112e64203a0f4366; BYTE $0x0e // pinsrb xmm4, byte [r14 + r13 + 17], 14 + QUAD $0x110664203a0f4166; BYTE $0x0f // pinsrb xmm4, byte [r14 + rax + 17], 15 + WORD $0x8948; BYTE $0xc1 // mov rcx, rax LONG $0x740f4566; BYTE $0xd6 // pcmpeqb xmm10, xmm14 LONG $0x740f4166; BYTE $0xe6 // pcmpeqb xmm4, xmm14 QUAD $0x00000100b56f0f66 // movdqa xmm6, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xe6df0f66 // pandn xmm4, xmm6 LONG $0xfc0f4166; BYTE $0xe2 // paddb xmm4, xmm10 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] LONG $0x74b60f41; WORD $0x1e06 // movzx esi, byte [r14 + rax + 30] LONG $0x6e0f4466; BYTE $0xd6 // movd xmm10, esi LONG $0x24748b48; BYTE $0x10 // mov rsi, qword [rsp + 16] @@ -28290,7 +29548,7 @@ LBB5_67: LONG $0x44b60f41; WORD $0x1f06 // movzx eax, byte [r14 + rax + 31] LONG $0xf06e0f66 // movd xmm6, eax QUAD $0x1f3674203a0f4166; BYTE $0x01 // pinsrb xmm6, byte [r14 + rsi + 31], 1 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] QUAD $0x12067c203a0f4166; BYTE $0x02 // pinsrb xmm7, byte [r14 + rax + 18], 2 QUAD $0x13066c203a0f4166; BYTE $0x02 // pinsrb xmm5, byte [r14 + rax + 19], 2 QUAD $0x14065c203a0f4166; BYTE $0x02 // pinsrb xmm3, byte [r14 + rax + 20], 2 @@ -28305,48 +29563,49 @@ LBB5_67: QUAD $0x1d064c203a0f4566; BYTE $0x02 // pinsrb xmm9, byte [r14 + rax + 29], 2 QUAD $0x1e0654203a0f4566; BYTE $0x02 // pinsrb xmm10, byte [r14 + rax + 30], 2 QUAD $0x1f0674203a0f4166; BYTE $0x02 // pinsrb xmm6, byte [r14 + rax + 31], 2 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] + LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] QUAD $0x123e7c203a0f4366; BYTE $0x03 // pinsrb xmm7, byte [r14 + r15 + 18], 3 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x78 // mov rax, qword [rsp + 120] QUAD $0x12067c203a0f4166; BYTE $0x04 // pinsrb xmm7, byte [r14 + rax + 18], 4 - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] - QUAD $0x121e7c203a0f4166; BYTE $0x05 // pinsrb xmm7, byte [r14 + rbx + 18], 5 + QUAD $0x12267c203a0f4366; BYTE $0x05 // pinsrb xmm7, byte [r14 + r12 + 18], 5 QUAD $0x12167c203a0f4366; BYTE $0x06 // pinsrb xmm7, byte [r14 + r10 + 18], 6 - QUAD $0x120e7c203a0f4366; BYTE $0x07 // pinsrb xmm7, byte [r14 + r9 + 18], 7 - QUAD $0x121e7c203a0f4366; BYTE $0x08 // pinsrb xmm7, byte [r14 + r11 + 18], 8 - LONG $0x24748b48; BYTE $0x60 // mov rsi, qword [rsp + 96] + LONG $0x247c8b48; BYTE $0x58 // mov rdi, qword [rsp + 88] + QUAD $0x123e7c203a0f4166; BYTE $0x07 // pinsrb xmm7, byte [r14 + rdi + 18], 7 + LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] + QUAD $0x120e7c203a0f4366; BYTE $0x08 // pinsrb xmm7, byte [r14 + r9 + 18], 8 + QUAD $0x0000008824b48b48 // mov rsi, qword [rsp + 136] QUAD $0x12367c203a0f4166; BYTE $0x09 // pinsrb xmm7, byte [r14 + rsi + 18], 9 - QUAD $0x12267c203a0f4366; BYTE $0x0a // pinsrb xmm7, byte [r14 + r12 + 18], 10 - QUAD $0x12067c203a0f4366; BYTE $0x0b // pinsrb xmm7, byte [r14 + r8 + 18], 11 - QUAD $0x123e7c203a0f4166; BYTE $0x0c // pinsrb xmm7, byte [r14 + rdi + 18], 12 - QUAD $0x12167c203a0f4166; BYTE $0x0d // pinsrb xmm7, byte [r14 + rdx + 18], 13 - QUAD $0x120e7c203a0f4166; BYTE $0x0e // pinsrb xmm7, byte [r14 + rcx + 18], 14 - QUAD $0x122e7c203a0f4366; BYTE $0x0f // pinsrb xmm7, byte [r14 + r13 + 18], 15 + QUAD $0x12067c203a0f4366; BYTE $0x0a // pinsrb xmm7, byte [r14 + r8 + 18], 10 + QUAD $0x12167c203a0f4166; BYTE $0x0b // pinsrb xmm7, byte [r14 + rdx + 18], 11 + QUAD $0x121e7c203a0f4366; BYTE $0x0c // pinsrb xmm7, byte [r14 + r11 + 18], 12 + QUAD $0x121e7c203a0f4166; BYTE $0x0d // pinsrb xmm7, byte [r14 + rbx + 18], 13 + QUAD $0x122e7c203a0f4366; BYTE $0x0e // pinsrb xmm7, byte [r14 + r13 + 18], 14 + QUAD $0x120e7c203a0f4166; BYTE $0x0f // pinsrb xmm7, byte [r14 + rcx + 18], 15 QUAD $0x133e6c203a0f4366; BYTE $0x03 // pinsrb xmm5, byte [r14 + r15 + 19], 3 QUAD $0x13066c203a0f4166; BYTE $0x04 // pinsrb xmm5, byte [r14 + rax + 19], 4 - QUAD $0x131e6c203a0f4166; BYTE $0x05 // pinsrb xmm5, byte [r14 + rbx + 19], 5 + QUAD $0x13266c203a0f4366; BYTE $0x05 // pinsrb xmm5, byte [r14 + r12 + 19], 5 QUAD $0x13166c203a0f4366; BYTE $0x06 // pinsrb xmm5, byte [r14 + r10 + 19], 6 - QUAD $0x130e6c203a0f4366; BYTE $0x07 // pinsrb xmm5, byte [r14 + r9 + 19], 7 - QUAD $0x131e6c203a0f4366; BYTE $0x08 // pinsrb xmm5, byte [r14 + r11 + 19], 8 + QUAD $0x133e6c203a0f4166; BYTE $0x07 // pinsrb xmm5, byte [r14 + rdi + 19], 7 + QUAD $0x130e6c203a0f4366; BYTE $0x08 // pinsrb xmm5, byte [r14 + r9 + 19], 8 QUAD $0x13366c203a0f4166; BYTE $0x09 // pinsrb xmm5, byte [r14 + rsi + 19], 9 - QUAD $0x13266c203a0f4366; BYTE $0x0a // pinsrb xmm5, byte [r14 + r12 + 19], 10 - QUAD $0x13066c203a0f4366; BYTE $0x0b // pinsrb xmm5, byte [r14 + r8 + 19], 11 - QUAD $0x133e6c203a0f4166; BYTE $0x0c // pinsrb xmm5, byte [r14 + rdi + 19], 12 - QUAD $0x13166c203a0f4166; BYTE $0x0d // pinsrb xmm5, byte [r14 + rdx + 19], 13 - QUAD $0x130e6c203a0f4166; BYTE $0x0e // pinsrb xmm5, byte [r14 + rcx + 19], 14 - QUAD $0x132e6c203a0f4366; BYTE $0x0f // pinsrb xmm5, byte [r14 + r13 + 19], 15 + QUAD $0x13066c203a0f4366; BYTE $0x0a // pinsrb xmm5, byte [r14 + r8 + 19], 10 + QUAD $0x13166c203a0f4166; BYTE $0x0b // pinsrb xmm5, byte [r14 + rdx + 19], 11 + QUAD $0x131e6c203a0f4366; BYTE $0x0c // pinsrb xmm5, byte [r14 + r11 + 19], 12 + QUAD $0x131e6c203a0f4166; BYTE $0x0d // pinsrb xmm5, byte [r14 + rbx + 19], 13 + QUAD $0x132e6c203a0f4366; BYTE $0x0e // pinsrb xmm5, byte [r14 + r13 + 19], 14 + QUAD $0x130e6c203a0f4166; BYTE $0x0f // pinsrb xmm5, byte [r14 + rcx + 19], 15 QUAD $0x143e5c203a0f4366; BYTE $0x03 // pinsrb xmm3, byte [r14 + r15 + 20], 3 QUAD $0x14065c203a0f4166; BYTE $0x04 // pinsrb xmm3, byte [r14 + rax + 20], 4 - QUAD $0x141e5c203a0f4166; BYTE $0x05 // pinsrb xmm3, byte [r14 + rbx + 20], 5 + QUAD $0x14265c203a0f4366; BYTE $0x05 // pinsrb xmm3, byte [r14 + r12 + 20], 5 QUAD $0x14165c203a0f4366; BYTE $0x06 // pinsrb xmm3, byte [r14 + r10 + 20], 6 - QUAD $0x140e5c203a0f4366; BYTE $0x07 // pinsrb xmm3, byte [r14 + r9 + 20], 7 - QUAD $0x141e5c203a0f4366; BYTE $0x08 // pinsrb xmm3, byte [r14 + r11 + 20], 8 + QUAD $0x143e5c203a0f4166; BYTE $0x07 // pinsrb xmm3, byte [r14 + rdi + 20], 7 + QUAD $0x140e5c203a0f4366; BYTE $0x08 // pinsrb xmm3, byte [r14 + r9 + 20], 8 QUAD $0x14365c203a0f4166; BYTE $0x09 // pinsrb xmm3, byte [r14 + rsi + 20], 9 - QUAD $0x14265c203a0f4366; BYTE $0x0a // pinsrb xmm3, byte [r14 + r12 + 20], 10 - QUAD $0x14065c203a0f4366; BYTE $0x0b // pinsrb xmm3, byte [r14 + r8 + 20], 11 - QUAD $0x143e5c203a0f4166; BYTE $0x0c // pinsrb xmm3, byte [r14 + rdi + 20], 12 - QUAD $0x14165c203a0f4166; BYTE $0x0d // pinsrb xmm3, byte [r14 + rdx + 20], 13 - QUAD $0x140e5c203a0f4166; BYTE $0x0e // pinsrb xmm3, byte [r14 + rcx + 20], 14 + QUAD $0x14065c203a0f4366; BYTE $0x0a // pinsrb xmm3, byte [r14 + r8 + 20], 10 + QUAD $0x14165c203a0f4166; BYTE $0x0b // pinsrb xmm3, byte [r14 + rdx + 20], 11 + QUAD $0x141e5c203a0f4366; BYTE $0x0c // pinsrb xmm3, byte [r14 + r11 + 20], 12 + QUAD $0x141e5c203a0f4166; BYTE $0x0d // pinsrb xmm3, byte [r14 + rbx + 20], 13 + QUAD $0x142e5c203a0f4366; BYTE $0x0e // pinsrb xmm3, byte [r14 + r13 + 20], 14 LONG $0x740f4166; BYTE $0xfe // pcmpeqb xmm7, xmm14 QUAD $0x000110b56f0f4466; BYTE $0x00 // movdqa xmm14, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0xdf0f4166; BYTE $0xfe // pandn xmm7, xmm14 @@ -28354,7 +29613,7 @@ LBB5_67: QUAD $0x000120b56f0f4466; BYTE $0x00 // movdqa xmm14, oword 288[rbp] /* [rip + .LCPI5_18] */ LONG $0xdf0f4166; BYTE $0xee // pandn xmm5, xmm14 LONG $0xefeb0f66 // por xmm5, xmm7 - QUAD $0x142e5c203a0f4366; BYTE $0x0f // pinsrb xmm3, byte [r14 + r13 + 20], 15 + QUAD $0x140e5c203a0f4166; BYTE $0x0f // pinsrb xmm3, byte [r14 + rcx + 20], 15 QUAD $0x00b024b46f0f4466; WORD $0x0000 // movdqa xmm14, oword [rsp + 176] LONG $0x740f4166; BYTE $0xde // pcmpeqb xmm3, xmm14 QUAD $0x00000130bd6f0f66 // movdqa xmm7, oword 304[rbp] /* [rip + .LCPI5_19] */ @@ -28365,42 +29624,42 @@ LBB5_67: LONG $0xdceb0f66 // por xmm3, xmm4 QUAD $0x153e54203a0f4366; BYTE $0x03 // pinsrb xmm2, byte [r14 + r15 + 21], 3 QUAD $0x150654203a0f4166; BYTE $0x04 // pinsrb xmm2, byte [r14 + rax + 21], 4 - QUAD $0x151e54203a0f4166; BYTE $0x05 // pinsrb xmm2, byte [r14 + rbx + 21], 5 + QUAD $0x152654203a0f4366; BYTE $0x05 // pinsrb xmm2, byte [r14 + r12 + 21], 5 QUAD $0x151654203a0f4366; BYTE $0x06 // pinsrb xmm2, byte [r14 + r10 + 21], 6 - QUAD $0x150e54203a0f4366; BYTE $0x07 // pinsrb xmm2, byte [r14 + r9 + 21], 7 - QUAD $0x151e54203a0f4366; BYTE $0x08 // pinsrb xmm2, byte [r14 + r11 + 21], 8 + QUAD $0x153e54203a0f4166; BYTE $0x07 // pinsrb xmm2, byte [r14 + rdi + 21], 7 + QUAD $0x150e54203a0f4366; BYTE $0x08 // pinsrb xmm2, byte [r14 + r9 + 21], 8 QUAD $0x153654203a0f4166; BYTE $0x09 // pinsrb xmm2, byte [r14 + rsi + 21], 9 - QUAD $0x152654203a0f4366; BYTE $0x0a // pinsrb xmm2, byte [r14 + r12 + 21], 10 - QUAD $0x150654203a0f4366; BYTE $0x0b // pinsrb xmm2, byte [r14 + r8 + 21], 11 - QUAD $0x153e54203a0f4166; BYTE $0x0c // pinsrb xmm2, byte [r14 + rdi + 21], 12 - QUAD $0x151654203a0f4166; BYTE $0x0d // pinsrb xmm2, byte [r14 + rdx + 21], 13 - QUAD $0x150e54203a0f4166; BYTE $0x0e // pinsrb xmm2, byte [r14 + rcx + 21], 14 - QUAD $0x152e54203a0f4366; BYTE $0x0f // pinsrb xmm2, byte [r14 + r13 + 21], 15 + QUAD $0x150654203a0f4366; BYTE $0x0a // pinsrb xmm2, byte [r14 + r8 + 21], 10 + QUAD $0x151654203a0f4166; BYTE $0x0b // pinsrb xmm2, byte [r14 + rdx + 21], 11 + QUAD $0x151e54203a0f4366; BYTE $0x0c // pinsrb xmm2, byte [r14 + r11 + 21], 12 + QUAD $0x151e54203a0f4166; BYTE $0x0d // pinsrb xmm2, byte [r14 + rbx + 21], 13 + QUAD $0x152e54203a0f4366; BYTE $0x0e // pinsrb xmm2, byte [r14 + r13 + 21], 14 + QUAD $0x150e54203a0f4166; BYTE $0x0f // pinsrb xmm2, byte [r14 + rcx + 21], 15 QUAD $0x163e4c203a0f4366; BYTE $0x03 // pinsrb xmm1, byte [r14 + r15 + 22], 3 QUAD $0x16064c203a0f4166; BYTE $0x04 // pinsrb xmm1, byte [r14 + rax + 22], 4 - QUAD $0x161e4c203a0f4166; BYTE $0x05 // pinsrb xmm1, byte [r14 + rbx + 22], 5 + QUAD $0x16264c203a0f4366; BYTE $0x05 // pinsrb xmm1, byte [r14 + r12 + 22], 5 QUAD $0x16164c203a0f4366; BYTE $0x06 // pinsrb xmm1, byte [r14 + r10 + 22], 6 - QUAD $0x160e4c203a0f4366; BYTE $0x07 // pinsrb xmm1, byte [r14 + r9 + 22], 7 - QUAD $0x161e4c203a0f4366; BYTE $0x08 // pinsrb xmm1, byte [r14 + r11 + 22], 8 + QUAD $0x163e4c203a0f4166; BYTE $0x07 // pinsrb xmm1, byte [r14 + rdi + 22], 7 + QUAD $0x160e4c203a0f4366; BYTE $0x08 // pinsrb xmm1, byte [r14 + r9 + 22], 8 QUAD $0x16364c203a0f4166; BYTE $0x09 // pinsrb xmm1, byte [r14 + rsi + 22], 9 - QUAD $0x16264c203a0f4366; BYTE $0x0a // pinsrb xmm1, byte [r14 + r12 + 22], 10 - QUAD $0x16064c203a0f4366; BYTE $0x0b // pinsrb xmm1, byte [r14 + r8 + 22], 11 - QUAD $0x163e4c203a0f4166; BYTE $0x0c // pinsrb xmm1, byte [r14 + rdi + 22], 12 - QUAD $0x16164c203a0f4166; BYTE $0x0d // pinsrb xmm1, byte [r14 + rdx + 22], 13 - QUAD $0x160e4c203a0f4166; BYTE $0x0e // pinsrb xmm1, byte [r14 + rcx + 22], 14 - QUAD $0x162e4c203a0f4366; BYTE $0x0f // pinsrb xmm1, byte [r14 + r13 + 22], 15 + QUAD $0x16064c203a0f4366; BYTE $0x0a // pinsrb xmm1, byte [r14 + r8 + 22], 10 + QUAD $0x16164c203a0f4166; BYTE $0x0b // pinsrb xmm1, byte [r14 + rdx + 22], 11 + QUAD $0x161e4c203a0f4366; BYTE $0x0c // pinsrb xmm1, byte [r14 + r11 + 22], 12 + QUAD $0x161e4c203a0f4166; BYTE $0x0d // pinsrb xmm1, byte [r14 + rbx + 22], 13 + QUAD $0x162e4c203a0f4366; BYTE $0x0e // pinsrb xmm1, byte [r14 + r13 + 22], 14 + QUAD $0x160e4c203a0f4166; BYTE $0x0f // pinsrb xmm1, byte [r14 + rcx + 22], 15 QUAD $0x173e44203a0f4766; BYTE $0x03 // pinsrb xmm8, byte [r14 + r15 + 23], 3 QUAD $0x170644203a0f4566; BYTE $0x04 // pinsrb xmm8, byte [r14 + rax + 23], 4 - QUAD $0x171e44203a0f4566; BYTE $0x05 // pinsrb xmm8, byte [r14 + rbx + 23], 5 + QUAD $0x172644203a0f4766; BYTE $0x05 // pinsrb xmm8, byte [r14 + r12 + 23], 5 QUAD $0x171644203a0f4766; BYTE $0x06 // pinsrb xmm8, byte [r14 + r10 + 23], 6 - QUAD $0x170e44203a0f4766; BYTE $0x07 // pinsrb xmm8, byte [r14 + r9 + 23], 7 - QUAD $0x171e44203a0f4766; BYTE $0x08 // pinsrb xmm8, byte [r14 + r11 + 23], 8 + QUAD $0x173e44203a0f4566; BYTE $0x07 // pinsrb xmm8, byte [r14 + rdi + 23], 7 + QUAD $0x170e44203a0f4766; BYTE $0x08 // pinsrb xmm8, byte [r14 + r9 + 23], 8 QUAD $0x173644203a0f4566; BYTE $0x09 // pinsrb xmm8, byte [r14 + rsi + 23], 9 - QUAD $0x172644203a0f4766; BYTE $0x0a // pinsrb xmm8, byte [r14 + r12 + 23], 10 - QUAD $0x170644203a0f4766; BYTE $0x0b // pinsrb xmm8, byte [r14 + r8 + 23], 11 - QUAD $0x173e44203a0f4566; BYTE $0x0c // pinsrb xmm8, byte [r14 + rdi + 23], 12 - QUAD $0x171644203a0f4566; BYTE $0x0d // pinsrb xmm8, byte [r14 + rdx + 23], 13 - QUAD $0x170e44203a0f4566; BYTE $0x0e // pinsrb xmm8, byte [r14 + rcx + 23], 14 + QUAD $0x170644203a0f4766; BYTE $0x0a // pinsrb xmm8, byte [r14 + r8 + 23], 10 + QUAD $0x171644203a0f4566; BYTE $0x0b // pinsrb xmm8, byte [r14 + rdx + 23], 11 + QUAD $0x171e44203a0f4766; BYTE $0x0c // pinsrb xmm8, byte [r14 + r11 + 23], 12 + QUAD $0x171e44203a0f4566; BYTE $0x0d // pinsrb xmm8, byte [r14 + rbx + 23], 13 + QUAD $0x172e44203a0f4766; BYTE $0x0e // pinsrb xmm8, byte [r14 + r13 + 23], 14 LONG $0x740f4166; BYTE $0xd6 // pcmpeqb xmm2, xmm14 QUAD $0x00000140ad6f0f66 // movdqa xmm5, oword 320[rbp] /* [rip + .LCPI5_20] */ LONG $0xd5df0f66 // pandn xmm2, xmm5 @@ -28408,68 +29667,68 @@ LBB5_67: QUAD $0x00000150bd6f0f66 // movdqa xmm7, oword 336[rbp] /* [rip + .LCPI5_21] */ LONG $0xcfdf0f66 // pandn xmm1, xmm7 LONG $0xcaeb0f66 // por xmm1, xmm2 - QUAD $0x172e44203a0f4766; BYTE $0x0f // pinsrb xmm8, byte [r14 + r13 + 23], 15 + QUAD $0x170e44203a0f4566; BYTE $0x0f // pinsrb xmm8, byte [r14 + rcx + 23], 15 LONG $0x740f4566; BYTE $0xc6 // pcmpeqb xmm8, xmm14 LONG $0x656f0f66; BYTE $0x60 // movdqa xmm4, oword 96[rbp] /* [rip + .LCPI5_6] */ LONG $0xdf0f4466; BYTE $0xc4 // pandn xmm8, xmm4 LONG $0xeb0f4466; BYTE $0xc1 // por xmm8, xmm1 QUAD $0x183e64203a0f4766; BYTE $0x03 // pinsrb xmm12, byte [r14 + r15 + 24], 3 QUAD $0x180664203a0f4566; BYTE $0x04 // pinsrb xmm12, byte [r14 + rax + 24], 4 - QUAD $0x181e64203a0f4566; BYTE $0x05 // pinsrb xmm12, byte [r14 + rbx + 24], 5 + QUAD $0x182664203a0f4766; BYTE $0x05 // pinsrb xmm12, byte [r14 + r12 + 24], 5 QUAD $0x181664203a0f4766; BYTE $0x06 // pinsrb xmm12, byte [r14 + r10 + 24], 6 - QUAD $0x180e64203a0f4766; BYTE $0x07 // pinsrb xmm12, byte [r14 + r9 + 24], 7 - QUAD $0x181e64203a0f4766; BYTE $0x08 // pinsrb xmm12, byte [r14 + r11 + 24], 8 + QUAD $0x183e64203a0f4566; BYTE $0x07 // pinsrb xmm12, byte [r14 + rdi + 24], 7 + QUAD $0x180e64203a0f4766; BYTE $0x08 // pinsrb xmm12, byte [r14 + r9 + 24], 8 QUAD $0x183664203a0f4566; BYTE $0x09 // pinsrb xmm12, byte [r14 + rsi + 24], 9 - QUAD $0x182664203a0f4766; BYTE $0x0a // pinsrb xmm12, byte [r14 + r12 + 24], 10 - QUAD $0x180664203a0f4766; BYTE $0x0b // pinsrb xmm12, byte [r14 + r8 + 24], 11 - QUAD $0x183e64203a0f4566; BYTE $0x0c // pinsrb xmm12, byte [r14 + rdi + 24], 12 - QUAD $0x181664203a0f4566; BYTE $0x0d // pinsrb xmm12, byte [r14 + rdx + 24], 13 - QUAD $0x180e64203a0f4566; BYTE $0x0e // pinsrb xmm12, byte [r14 + rcx + 24], 14 - QUAD $0x182e64203a0f4766; BYTE $0x0f // pinsrb xmm12, byte [r14 + r13 + 24], 15 + QUAD $0x180664203a0f4766; BYTE $0x0a // pinsrb xmm12, byte [r14 + r8 + 24], 10 + QUAD $0x181664203a0f4566; BYTE $0x0b // pinsrb xmm12, byte [r14 + rdx + 24], 11 + QUAD $0x181e64203a0f4766; BYTE $0x0c // pinsrb xmm12, byte [r14 + r11 + 24], 12 + QUAD $0x181e64203a0f4566; BYTE $0x0d // pinsrb xmm12, byte [r14 + rbx + 24], 13 + QUAD $0x182e64203a0f4766; BYTE $0x0e // pinsrb xmm12, byte [r14 + r13 + 24], 14 + QUAD $0x180e64203a0f4566; BYTE $0x0f // pinsrb xmm12, byte [r14 + rcx + 24], 15 LONG $0xeb0f4466; BYTE $0xc3 // por xmm8, xmm3 LONG $0x740f4566; BYTE $0xe6 // pcmpeqb xmm12, xmm14 QUAD $0x193e6c203a0f4766; BYTE $0x03 // pinsrb xmm13, byte [r14 + r15 + 25], 3 QUAD $0x19066c203a0f4566; BYTE $0x04 // pinsrb xmm13, byte [r14 + rax + 25], 4 - QUAD $0x191e6c203a0f4566; BYTE $0x05 // pinsrb xmm13, byte [r14 + rbx + 25], 5 + QUAD $0x19266c203a0f4766; BYTE $0x05 // pinsrb xmm13, byte [r14 + r12 + 25], 5 QUAD $0x19166c203a0f4766; BYTE $0x06 // pinsrb xmm13, byte [r14 + r10 + 25], 6 - QUAD $0x190e6c203a0f4766; BYTE $0x07 // pinsrb xmm13, byte [r14 + r9 + 25], 7 - QUAD $0x191e6c203a0f4766; BYTE $0x08 // pinsrb xmm13, byte [r14 + r11 + 25], 8 + QUAD $0x193e6c203a0f4566; BYTE $0x07 // pinsrb xmm13, byte [r14 + rdi + 25], 7 + QUAD $0x190e6c203a0f4766; BYTE $0x08 // pinsrb xmm13, byte [r14 + r9 + 25], 8 QUAD $0x19366c203a0f4566; BYTE $0x09 // pinsrb xmm13, byte [r14 + rsi + 25], 9 - QUAD $0x19266c203a0f4766; BYTE $0x0a // pinsrb xmm13, byte [r14 + r12 + 25], 10 - QUAD $0x19066c203a0f4766; BYTE $0x0b // pinsrb xmm13, byte [r14 + r8 + 25], 11 - QUAD $0x193e6c203a0f4566; BYTE $0x0c // pinsrb xmm13, byte [r14 + rdi + 25], 12 - QUAD $0x19166c203a0f4566; BYTE $0x0d // pinsrb xmm13, byte [r14 + rdx + 25], 13 - QUAD $0x190e6c203a0f4566; BYTE $0x0e // pinsrb xmm13, byte [r14 + rcx + 25], 14 - QUAD $0x192e6c203a0f4766; BYTE $0x0f // pinsrb xmm13, byte [r14 + r13 + 25], 15 + QUAD $0x19066c203a0f4766; BYTE $0x0a // pinsrb xmm13, byte [r14 + r8 + 25], 10 + QUAD $0x19166c203a0f4566; BYTE $0x0b // pinsrb xmm13, byte [r14 + rdx + 25], 11 + QUAD $0x191e6c203a0f4766; BYTE $0x0c // pinsrb xmm13, byte [r14 + r11 + 25], 12 + QUAD $0x191e6c203a0f4566; BYTE $0x0d // pinsrb xmm13, byte [r14 + rbx + 25], 13 + QUAD $0x192e6c203a0f4766; BYTE $0x0e // pinsrb xmm13, byte [r14 + r13 + 25], 14 + QUAD $0x190e6c203a0f4566; BYTE $0x0f // pinsrb xmm13, byte [r14 + rcx + 25], 15 QUAD $0x1a3e44203a0f4366; BYTE $0x03 // pinsrb xmm0, byte [r14 + r15 + 26], 3 QUAD $0x1a0644203a0f4166; BYTE $0x04 // pinsrb xmm0, byte [r14 + rax + 26], 4 - QUAD $0x1a1e44203a0f4166; BYTE $0x05 // pinsrb xmm0, byte [r14 + rbx + 26], 5 + QUAD $0x1a2644203a0f4366; BYTE $0x05 // pinsrb xmm0, byte [r14 + r12 + 26], 5 QUAD $0x1a1644203a0f4366; BYTE $0x06 // pinsrb xmm0, byte [r14 + r10 + 26], 6 - QUAD $0x1a0e44203a0f4366; BYTE $0x07 // pinsrb xmm0, byte [r14 + r9 + 26], 7 - QUAD $0x1a1e44203a0f4366; BYTE $0x08 // pinsrb xmm0, byte [r14 + r11 + 26], 8 + QUAD $0x1a3e44203a0f4166; BYTE $0x07 // pinsrb xmm0, byte [r14 + rdi + 26], 7 + QUAD $0x1a0e44203a0f4366; BYTE $0x08 // pinsrb xmm0, byte [r14 + r9 + 26], 8 QUAD $0x1a3644203a0f4166; BYTE $0x09 // pinsrb xmm0, byte [r14 + rsi + 26], 9 - QUAD $0x1a2644203a0f4366; BYTE $0x0a // pinsrb xmm0, byte [r14 + r12 + 26], 10 - QUAD $0x1a0644203a0f4366; BYTE $0x0b // pinsrb xmm0, byte [r14 + r8 + 26], 11 - QUAD $0x1a3e44203a0f4166; BYTE $0x0c // pinsrb xmm0, byte [r14 + rdi + 26], 12 - QUAD $0x1a1644203a0f4166; BYTE $0x0d // pinsrb xmm0, byte [r14 + rdx + 26], 13 - QUAD $0x1a0e44203a0f4166; BYTE $0x0e // pinsrb xmm0, byte [r14 + rcx + 26], 14 - QUAD $0x1a2e44203a0f4366; BYTE $0x0f // pinsrb xmm0, byte [r14 + r13 + 26], 15 + QUAD $0x1a0644203a0f4366; BYTE $0x0a // pinsrb xmm0, byte [r14 + r8 + 26], 10 + QUAD $0x1a1644203a0f4166; BYTE $0x0b // pinsrb xmm0, byte [r14 + rdx + 26], 11 + QUAD $0x1a1e44203a0f4366; BYTE $0x0c // pinsrb xmm0, byte [r14 + r11 + 26], 12 + QUAD $0x1a1e44203a0f4166; BYTE $0x0d // pinsrb xmm0, byte [r14 + rbx + 26], 13 + QUAD $0x1a2e44203a0f4366; BYTE $0x0e // pinsrb xmm0, byte [r14 + r13 + 26], 14 + QUAD $0x1a0e44203a0f4166; BYTE $0x0f // pinsrb xmm0, byte [r14 + rcx + 26], 15 QUAD $0x1b3e5c203a0f4766; BYTE $0x03 // pinsrb xmm11, byte [r14 + r15 + 27], 3 QUAD $0x1b065c203a0f4566; BYTE $0x04 // pinsrb xmm11, byte [r14 + rax + 27], 4 - QUAD $0x1b1e5c203a0f4566; BYTE $0x05 // pinsrb xmm11, byte [r14 + rbx + 27], 5 + QUAD $0x1b265c203a0f4766; BYTE $0x05 // pinsrb xmm11, byte [r14 + r12 + 27], 5 QUAD $0x1b165c203a0f4766; BYTE $0x06 // pinsrb xmm11, byte [r14 + r10 + 27], 6 - QUAD $0x1b0e5c203a0f4766; BYTE $0x07 // pinsrb xmm11, byte [r14 + r9 + 27], 7 - QUAD $0x1b1e5c203a0f4766; BYTE $0x08 // pinsrb xmm11, byte [r14 + r11 + 27], 8 + QUAD $0x1b3e5c203a0f4566; BYTE $0x07 // pinsrb xmm11, byte [r14 + rdi + 27], 7 + QUAD $0x1b0e5c203a0f4766; BYTE $0x08 // pinsrb xmm11, byte [r14 + r9 + 27], 8 QUAD $0x1b365c203a0f4566; BYTE $0x09 // pinsrb xmm11, byte [r14 + rsi + 27], 9 - QUAD $0x1b265c203a0f4766; BYTE $0x0a // pinsrb xmm11, byte [r14 + r12 + 27], 10 - QUAD $0x1b065c203a0f4766; BYTE $0x0b // pinsrb xmm11, byte [r14 + r8 + 27], 11 - QUAD $0x1b3e5c203a0f4566; BYTE $0x0c // pinsrb xmm11, byte [r14 + rdi + 27], 12 - QUAD $0x1b165c203a0f4566; BYTE $0x0d // pinsrb xmm11, byte [r14 + rdx + 27], 13 - QUAD $0x1b0e5c203a0f4566; BYTE $0x0e // pinsrb xmm11, byte [r14 + rcx + 27], 14 + QUAD $0x1b065c203a0f4766; BYTE $0x0a // pinsrb xmm11, byte [r14 + r8 + 27], 10 + QUAD $0x1b165c203a0f4566; BYTE $0x0b // pinsrb xmm11, byte [r14 + rdx + 27], 11 + QUAD $0x1b1e5c203a0f4766; BYTE $0x0c // pinsrb xmm11, byte [r14 + r11 + 27], 12 + QUAD $0x1b1e5c203a0f4566; BYTE $0x0d // pinsrb xmm11, byte [r14 + rbx + 27], 13 + QUAD $0x1b2e5c203a0f4766; BYTE $0x0e // pinsrb xmm11, byte [r14 + r13 + 27], 14 LONG $0x740f4566; BYTE $0xee // pcmpeqb xmm13, xmm14 QUAD $0x000100addf0f4466; BYTE $0x00 // pandn xmm13, oword 256[rbp] /* [rip + .LCPI5_16] */ LONG $0xfc0f4566; BYTE $0xec // paddb xmm13, xmm12 - QUAD $0x1b2e5c203a0f4766; BYTE $0x0f // pinsrb xmm11, byte [r14 + r13 + 27], 15 + QUAD $0x1b0e5c203a0f4566; BYTE $0x0f // pinsrb xmm11, byte [r14 + rcx + 27], 15 LONG $0x740f4166; BYTE $0xc6 // pcmpeqb xmm0, xmm14 QUAD $0x0000011085df0f66 // pandn xmm0, oword 272[rbp] /* [rip + .LCPI5_17] */ LONG $0x740f4566; BYTE $0xde // pcmpeqb xmm11, xmm14 @@ -28483,61 +29742,61 @@ LBB5_67: QUAD $0x1d064c203a0f4566; BYTE $0x04 // pinsrb xmm9, byte [r14 + rax + 29], 4 QUAD $0x1e0654203a0f4566; BYTE $0x04 // pinsrb xmm10, byte [r14 + rax + 30], 4 QUAD $0x1f0674203a0f4166; BYTE $0x04 // pinsrb xmm6, byte [r14 + rax + 31], 4 - WORD $0x8948; BYTE $0xd8 // mov rax, rbx - QUAD $0x1c1e7c203a0f4566; BYTE $0x05 // pinsrb xmm15, byte [r14 + rbx + 28], 5 - QUAD $0x1d1e4c203a0f4566; BYTE $0x05 // pinsrb xmm9, byte [r14 + rbx + 29], 5 - QUAD $0x1e1e54203a0f4566; BYTE $0x05 // pinsrb xmm10, byte [r14 + rbx + 30], 5 - QUAD $0x1f1e74203a0f4166; BYTE $0x05 // pinsrb xmm6, byte [r14 + rbx + 31], 5 + QUAD $0x1c267c203a0f4766; BYTE $0x05 // pinsrb xmm15, byte [r14 + r12 + 28], 5 + QUAD $0x1d264c203a0f4766; BYTE $0x05 // pinsrb xmm9, byte [r14 + r12 + 29], 5 + QUAD $0x1e2654203a0f4766; BYTE $0x05 // pinsrb xmm10, byte [r14 + r12 + 30], 5 + QUAD $0x1f2674203a0f4366; BYTE $0x05 // pinsrb xmm6, byte [r14 + r12 + 31], 5 WORD $0x894c; BYTE $0xd0 // mov rax, r10 QUAD $0x1c167c203a0f4766; BYTE $0x06 // pinsrb xmm15, byte [r14 + r10 + 28], 6 QUAD $0x1d164c203a0f4766; BYTE $0x06 // pinsrb xmm9, byte [r14 + r10 + 29], 6 QUAD $0x1e1654203a0f4766; BYTE $0x06 // pinsrb xmm10, byte [r14 + r10 + 30], 6 QUAD $0x1f1674203a0f4366; BYTE $0x06 // pinsrb xmm6, byte [r14 + r10 + 31], 6 + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + QUAD $0x1c3e7c203a0f4566; BYTE $0x07 // pinsrb xmm15, byte [r14 + rdi + 28], 7 + QUAD $0x1d3e4c203a0f4566; BYTE $0x07 // pinsrb xmm9, byte [r14 + rdi + 29], 7 + QUAD $0x1e3e54203a0f4566; BYTE $0x07 // pinsrb xmm10, byte [r14 + rdi + 30], 7 + QUAD $0x1f3e74203a0f4166; BYTE $0x07 // pinsrb xmm6, byte [r14 + rdi + 31], 7 WORD $0x894c; BYTE $0xc8 // mov rax, r9 - QUAD $0x1c0e7c203a0f4766; BYTE $0x07 // pinsrb xmm15, byte [r14 + r9 + 28], 7 - QUAD $0x1d0e4c203a0f4766; BYTE $0x07 // pinsrb xmm9, byte [r14 + r9 + 29], 7 - QUAD $0x1e0e54203a0f4766; BYTE $0x07 // pinsrb xmm10, byte [r14 + r9 + 30], 7 - QUAD $0x1f0e74203a0f4366; BYTE $0x07 // pinsrb xmm6, byte [r14 + r9 + 31], 7 - QUAD $0x1c1e7c203a0f4766; BYTE $0x08 // pinsrb xmm15, byte [r14 + r11 + 28], 8 - QUAD $0x1d1e4c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r11 + 29], 8 - QUAD $0x1e1e54203a0f4766; BYTE $0x08 // pinsrb xmm10, byte [r14 + r11 + 30], 8 - QUAD $0x1f1e74203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r11 + 31], 8 + QUAD $0x1c0e7c203a0f4766; BYTE $0x08 // pinsrb xmm15, byte [r14 + r9 + 28], 8 + QUAD $0x1d0e4c203a0f4766; BYTE $0x08 // pinsrb xmm9, byte [r14 + r9 + 29], 8 + QUAD $0x1e0e54203a0f4766; BYTE $0x08 // pinsrb xmm10, byte [r14 + r9 + 30], 8 + QUAD $0x1f0e74203a0f4366; BYTE $0x08 // pinsrb xmm6, byte [r14 + r9 + 31], 8 WORD $0x8948; BYTE $0xf0 // mov rax, rsi QUAD $0x1c367c203a0f4566; BYTE $0x09 // pinsrb xmm15, byte [r14 + rsi + 28], 9 QUAD $0x1d364c203a0f4566; BYTE $0x09 // pinsrb xmm9, byte [r14 + rsi + 29], 9 QUAD $0x1e3654203a0f4566; BYTE $0x09 // pinsrb xmm10, byte [r14 + rsi + 30], 9 QUAD $0x1f3674203a0f4166; BYTE $0x09 // pinsrb xmm6, byte [r14 + rsi + 31], 9 - QUAD $0x1c267c203a0f4766; BYTE $0x0a // pinsrb xmm15, byte [r14 + r12 + 28], 10 - QUAD $0x1d264c203a0f4766; BYTE $0x0a // pinsrb xmm9, byte [r14 + r12 + 29], 10 - QUAD $0x1e2654203a0f4766; BYTE $0x0a // pinsrb xmm10, byte [r14 + r12 + 30], 10 - QUAD $0x1f2674203a0f4366; BYTE $0x0a // pinsrb xmm6, byte [r14 + r12 + 31], 10 WORD $0x894c; BYTE $0xc0 // mov rax, r8 - QUAD $0x1c067c203a0f4766; BYTE $0x0b // pinsrb xmm15, byte [r14 + r8 + 28], 11 - QUAD $0x1d064c203a0f4766; BYTE $0x0b // pinsrb xmm9, byte [r14 + r8 + 29], 11 - QUAD $0x1e0654203a0f4766; BYTE $0x0b // pinsrb xmm10, byte [r14 + r8 + 30], 11 - QUAD $0x1f0674203a0f4366; BYTE $0x0b // pinsrb xmm6, byte [r14 + r8 + 31], 11 - WORD $0x8948; BYTE $0xf8 // mov rax, rdi - QUAD $0x1c3e7c203a0f4566; BYTE $0x0c // pinsrb xmm15, byte [r14 + rdi + 28], 12 - QUAD $0x1d3e4c203a0f4566; BYTE $0x0c // pinsrb xmm9, byte [r14 + rdi + 29], 12 - QUAD $0x1e3e54203a0f4566; BYTE $0x0c // pinsrb xmm10, byte [r14 + rdi + 30], 12 - QUAD $0x1f3e74203a0f4166; BYTE $0x0c // pinsrb xmm6, byte [r14 + rdi + 31], 12 + QUAD $0x1c067c203a0f4766; BYTE $0x0a // pinsrb xmm15, byte [r14 + r8 + 28], 10 + QUAD $0x1d064c203a0f4766; BYTE $0x0a // pinsrb xmm9, byte [r14 + r8 + 29], 10 + QUAD $0x1e0654203a0f4766; BYTE $0x0a // pinsrb xmm10, byte [r14 + r8 + 30], 10 + QUAD $0x1f0674203a0f4366; BYTE $0x0a // pinsrb xmm6, byte [r14 + r8 + 31], 10 WORD $0x8948; BYTE $0xd0 // mov rax, rdx - QUAD $0x1c167c203a0f4566; BYTE $0x0d // pinsrb xmm15, byte [r14 + rdx + 28], 13 - QUAD $0x1d164c203a0f4566; BYTE $0x0d // pinsrb xmm9, byte [r14 + rdx + 29], 13 - QUAD $0x1e1654203a0f4566; BYTE $0x0d // pinsrb xmm10, byte [r14 + rdx + 30], 13 - QUAD $0x1f1674203a0f4166; BYTE $0x0d // pinsrb xmm6, byte [r14 + rdx + 31], 13 - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x1c0e7c203a0f4566; BYTE $0x0e // pinsrb xmm15, byte [r14 + rcx + 28], 14 - QUAD $0x1d0e4c203a0f4566; BYTE $0x0e // pinsrb xmm9, byte [r14 + rcx + 29], 14 - QUAD $0x1e0e54203a0f4566; BYTE $0x0e // pinsrb xmm10, byte [r14 + rcx + 30], 14 - QUAD $0x1f0e74203a0f4166; BYTE $0x0e // pinsrb xmm6, byte [r14 + rcx + 31], 14 - QUAD $0x1c2e7c203a0f4766; BYTE $0x0f // pinsrb xmm15, byte [r14 + r13 + 28], 15 - QUAD $0x1d2e4c203a0f4766; BYTE $0x0f // pinsrb xmm9, byte [r14 + r13 + 29], 15 - QUAD $0x1e2e54203a0f4766; BYTE $0x0f // pinsrb xmm10, byte [r14 + r13 + 30], 15 + QUAD $0x1c167c203a0f4566; BYTE $0x0b // pinsrb xmm15, byte [r14 + rdx + 28], 11 + QUAD $0x1d164c203a0f4566; BYTE $0x0b // pinsrb xmm9, byte [r14 + rdx + 29], 11 + QUAD $0x1e1654203a0f4566; BYTE $0x0b // pinsrb xmm10, byte [r14 + rdx + 30], 11 + QUAD $0x1f1674203a0f4166; BYTE $0x0b // pinsrb xmm6, byte [r14 + rdx + 31], 11 + WORD $0x894c; BYTE $0xd8 // mov rax, r11 + QUAD $0x1c1e7c203a0f4766; BYTE $0x0c // pinsrb xmm15, byte [r14 + r11 + 28], 12 + QUAD $0x1d1e4c203a0f4766; BYTE $0x0c // pinsrb xmm9, byte [r14 + r11 + 29], 12 + QUAD $0x1e1e54203a0f4766; BYTE $0x0c // pinsrb xmm10, byte [r14 + r11 + 30], 12 + QUAD $0x1f1e74203a0f4366; BYTE $0x0c // pinsrb xmm6, byte [r14 + r11 + 31], 12 + QUAD $0x1c1e7c203a0f4566; BYTE $0x0d // pinsrb xmm15, byte [r14 + rbx + 28], 13 + QUAD $0x1d1e4c203a0f4566; BYTE $0x0d // pinsrb xmm9, byte [r14 + rbx + 29], 13 + QUAD $0x1e1e54203a0f4566; BYTE $0x0d // pinsrb xmm10, byte [r14 + rbx + 30], 13 + QUAD $0x1f1e74203a0f4166; BYTE $0x0d // pinsrb xmm6, byte [r14 + rbx + 31], 13 + WORD $0x894c; BYTE $0xe8 // mov rax, r13 + QUAD $0x1c2e7c203a0f4766; BYTE $0x0e // pinsrb xmm15, byte [r14 + r13 + 28], 14 + QUAD $0x1d2e4c203a0f4766; BYTE $0x0e // pinsrb xmm9, byte [r14 + r13 + 29], 14 + QUAD $0x1e2e54203a0f4766; BYTE $0x0e // pinsrb xmm10, byte [r14 + r13 + 30], 14 + QUAD $0x1f2e74203a0f4366; BYTE $0x0e // pinsrb xmm6, byte [r14 + r13 + 31], 14 + QUAD $0x1c0e7c203a0f4566; BYTE $0x0f // pinsrb xmm15, byte [r14 + rcx + 28], 15 + QUAD $0x1d0e4c203a0f4566; BYTE $0x0f // pinsrb xmm9, byte [r14 + rcx + 29], 15 + QUAD $0x1e0e54203a0f4566; BYTE $0x0f // pinsrb xmm10, byte [r14 + rcx + 30], 15 LONG $0x740f4566; BYTE $0xfe // pcmpeqb xmm15, xmm14 QUAD $0x000130bddf0f4466; BYTE $0x00 // pandn xmm15, oword 304[rbp] /* [rip + .LCPI5_19] */ LONG $0xeb0f4566; BYTE $0xfb // por xmm15, xmm11 - QUAD $0x1f2e74203a0f4366; BYTE $0x0f // pinsrb xmm6, byte [r14 + r13 + 31], 15 + QUAD $0x1f0e74203a0f4166; BYTE $0x0f // pinsrb xmm6, byte [r14 + rcx + 31], 15 QUAD $0x000160adf80f4466; BYTE $0x00 // psubb xmm13, oword 352[rbp] /* [rip + .LCPI5_22] */ LONG $0xeb0f4566; BYTE $0xfd // por xmm15, xmm13 LONG $0x740f4566; BYTE $0xce // pcmpeqb xmm9, xmm14 @@ -28551,7 +29810,7 @@ LBB5_67: LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 LONG $0x6f0f4166; BYTE $0xc0 // movdqa xmm0, xmm8 LONG $0xc6600f66 // punpcklbw xmm0, xmm6 - QUAD $0x000100249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 256] + QUAD $0x0000f0249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 240] LONG $0xcb6f0f66 // movdqa xmm1, xmm3 QUAD $0x0000c024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 192] LONG $0xcc600f66 // punpcklbw xmm1, xmm4 @@ -28571,23 +29830,23 @@ LBB5_67: LONG $0x147f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm2 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x000000d8248c3b48 // cmp rcx, qword [rsp + 216] - JNE LBB5_67 - QUAD $0x000000f824948b4c // mov r10, qword [rsp + 248] - QUAD $0x000000d824943b4c // cmp r10, qword [rsp + 216] + QUAD $0x000000e8248c3b48 // cmp rcx, qword [rsp + 232] + JNE LBB5_68 + QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x000000e824943b4c // cmp r10, qword [rsp + 232] QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] - JNE LBB5_69 - JMP LBB5_72 + JNE LBB5_70 + JMP LBB5_73 -LBB5_109: +LBB5_110: LONG $0xf8e28349 // and r10, -8 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x06e0c148 // shl rax, 6 WORD $0x014c; BYTE $0xf0 // add rax, r14 LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x2454894c; BYTE $0x28 // mov qword [rsp + 40], r10 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - LONG $0x2454894c; BYTE $0x18 // mov qword [rsp + 24], r10 LONG $0x90048d4a // lea rax, [rax + 4*r10] LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax LONG $0x6e0f4166; BYTE $0xc3 // movd xmm0, r11d @@ -28596,13 +29855,13 @@ LBB5_109: WORD $0xff31 // xor edi, edi LONG $0xef0f4566; BYTE $0xc9 // pxor xmm9, xmm9 -LBB5_110: +LBB5_111: LONG $0x247c8948; BYTE $0x40 // mov qword [rsp + 64], rdi LONG $0x06e7c148 // shl rdi, 6 WORD $0x8949; BYTE $0xff // mov r15, rdi WORD $0x8948; BYTE $0xfe // mov rsi, rdi + WORD $0x8949; BYTE $0xfc // mov r12, rdi WORD $0x8948; BYTE $0xfa // mov rdx, rdi - WORD $0x8949; BYTE $0xfd // mov r13, rdi WORD $0x8948; BYTE $0xfb // mov rbx, rdi WORD $0x8949; BYTE $0xf9 // mov r9, rdi LONG $0x04b70f41; BYTE $0x3e // movzx eax, word [r14 + rdi] @@ -28620,39 +29879,39 @@ LBB5_110: LONG $0x44b70f41; WORD $0x0c3e // movzx eax, word [r14 + rdi + 12] LONG $0x44b70f45; WORD $0x0e3e // movzx r8d, word [r14 + rdi + 14] LONG $0x54b70f45; WORD $0x103e // movzx r10d, word [r14 + rdi + 16] - LONG $0x64b70f45; WORD $0x123e // movzx r12d, word [r14 + rdi + 18] + LONG $0x6cb70f45; WORD $0x123e // movzx r13d, word [r14 + rdi + 18] LONG $0x4cb70f41; WORD $0x143e // movzx ecx, word [r14 + rdi + 20] - LONG $0x28244c89 // mov dword [rsp + 40], ecx + LONG $0x20244c89 // mov dword [rsp + 32], ecx WORD $0x8948; BYTE $0xf9 // mov rcx, rdi LONG $0x40c98348 // or rcx, 64 LONG $0x80cf8149; WORD $0x0000; BYTE $0x00 // or r15, 128 LONG $0xc0ce8148; WORD $0x0000; BYTE $0x00 // or rsi, 192 - LONG $0x00ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 256 - LONG $0x40cd8149; WORD $0x0001; BYTE $0x00 // or r13, 320 + LONG $0x00cc8149; WORD $0x0001; BYTE $0x00 // or r12, 256 + LONG $0x40ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 320 LONG $0x80cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 384 LONG $0xc40f4166; WORD $0x0e2c; BYTE $0x01 // pinsrw xmm5, word [r14 + rcx], 1 LONG $0xc40f4366; WORD $0x3e2c; BYTE $0x02 // pinsrw xmm5, word [r14 + r15], 2 LONG $0xc40f4166; WORD $0x362c; BYTE $0x03 // pinsrw xmm5, word [r14 + rsi], 3 - LONG $0xc40f4166; WORD $0x162c; BYTE $0x04 // pinsrw xmm5, word [r14 + rdx], 4 - LONG $0xc40f4366; WORD $0x2e2c; BYTE $0x05 // pinsrw xmm5, word [r14 + r13], 5 + LONG $0xc40f4366; WORD $0x262c; BYTE $0x04 // pinsrw xmm5, word [r14 + r12], 4 + LONG $0xc40f4166; WORD $0x162c; BYTE $0x05 // pinsrw xmm5, word [r14 + rdx], 5 LONG $0xc40f4166; WORD $0x1e2c; BYTE $0x06 // pinsrw xmm5, word [r14 + rbx], 6 QUAD $0x01020e44c40f4166 // pinsrw xmm0, word [r14 + rcx + 2], 1 QUAD $0x02023e44c40f4366 // pinsrw xmm0, word [r14 + r15 + 2], 2 QUAD $0x03023644c40f4166 // pinsrw xmm0, word [r14 + rsi + 2], 3 - QUAD $0x04021644c40f4166 // pinsrw xmm0, word [r14 + rdx + 2], 4 - QUAD $0x05022e44c40f4366 // pinsrw xmm0, word [r14 + r13 + 2], 5 + QUAD $0x04022644c40f4366 // pinsrw xmm0, word [r14 + r12 + 2], 4 + QUAD $0x05021644c40f4166 // pinsrw xmm0, word [r14 + rdx + 2], 5 QUAD $0x06021e44c40f4166 // pinsrw xmm0, word [r14 + rbx + 2], 6 LONG $0xc0c98149; WORD $0x0001; BYTE $0x00 // or r9, 448 QUAD $0x07020e44c40f4366 // pinsrw xmm0, word [r14 + r9 + 2], 7 LONG $0xd06e0f66 // movd xmm2, eax LONG $0x44b70f41; WORD $0x163e // movzx eax, word [r14 + rdi + 22] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax LONG $0x750f4166; BYTE $0xc3 // pcmpeqw xmm0, xmm11 QUAD $0x01040e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 4], 1 QUAD $0x02043e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 4], 2 QUAD $0x0304364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 4], 3 - QUAD $0x0404164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 4], 4 - QUAD $0x05042e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 4], 5 + QUAD $0x0404264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 4], 4 + QUAD $0x0504164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 4], 5 QUAD $0x06041e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 4], 6 QUAD $0x07040e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 4], 7 LONG $0xc0630f66 // packsswb xmm0, xmm0 @@ -28675,8 +29934,8 @@ LBB5_110: QUAD $0x01060e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 6], 1 QUAD $0x02063e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 6], 2 QUAD $0x0306367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 6], 3 - QUAD $0x0406167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 6], 4 - QUAD $0x05062e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 6], 5 + QUAD $0x0406267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 6], 4 + QUAD $0x0506167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 6], 5 QUAD $0x06061e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 6], 6 QUAD $0x07060e7cc40f4366 // pinsrw xmm7, word [r14 + r9 + 6], 7 LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 @@ -28684,8 +29943,8 @@ LBB5_110: QUAD $0x01080e44c40f4566 // pinsrw xmm8, word [r14 + rcx + 8], 1 QUAD $0x02083e44c40f4766 // pinsrw xmm8, word [r14 + r15 + 8], 2 QUAD $0x03083644c40f4566 // pinsrw xmm8, word [r14 + rsi + 8], 3 - QUAD $0x04081644c40f4566 // pinsrw xmm8, word [r14 + rdx + 8], 4 - QUAD $0x05082e44c40f4766 // pinsrw xmm8, word [r14 + r13 + 8], 5 + QUAD $0x04082644c40f4766 // pinsrw xmm8, word [r14 + r12 + 8], 4 + QUAD $0x05081644c40f4566 // pinsrw xmm8, word [r14 + rdx + 8], 5 QUAD $0x06081e44c40f4566 // pinsrw xmm8, word [r14 + rbx + 8], 6 QUAD $0x07080e44c40f4766 // pinsrw xmm8, word [r14 + r9 + 8], 7 LONG $0xddf80f66 // psubb xmm3, xmm5 @@ -28700,13 +29959,13 @@ LBB5_110: QUAD $0x0000b0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 176[rbp] /* [rip + .LCPI5_11] */ LONG $0x6f0f4166; BYTE $0xc0 // movdqa xmm0, xmm8 LONG $0x380f4566; WORD $0xe910 // pblendvb xmm13, xmm9, xmm0 - LONG $0x6e0f4166; BYTE $0xf4 // movd xmm6, r12d - LONG $0x64b70f45; WORD $0x1c3e // movzx r12d, word [r14 + rdi + 28] + LONG $0x6e0f4166; BYTE $0xf5 // movd xmm6, r13d + LONG $0x6cb70f45; WORD $0x1c3e // movzx r13d, word [r14 + rdi + 28] QUAD $0x010a0e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 10], 1 QUAD $0x020a3e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 10], 2 QUAD $0x030a3664c40f4166 // pinsrw xmm4, word [r14 + rsi + 10], 3 - QUAD $0x040a1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 10], 4 - QUAD $0x050a2e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 10], 5 + QUAD $0x040a2664c40f4366 // pinsrw xmm4, word [r14 + r12 + 10], 4 + QUAD $0x050a1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 10], 5 QUAD $0x060a1e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 10], 6 QUAD $0x070a0e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 10], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -28714,14 +29973,14 @@ LBB5_110: QUAD $0x010c0e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 12], 1 QUAD $0x020c3e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 12], 2 QUAD $0x030c3654c40f4166 // pinsrw xmm2, word [r14 + rsi + 12], 3 - QUAD $0x040c1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 12], 4 - QUAD $0x050c2e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 12], 5 + QUAD $0x040c2654c40f4366 // pinsrw xmm2, word [r14 + r12 + 12], 4 + QUAD $0x050c1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 12], 5 QUAD $0x060c1e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 12], 6 LONG $0xeb0f4466; BYTE $0xe3 // por xmm12, xmm3 QUAD $0x000000c0ad6f0f66 // movdqa xmm5, oword 192[rbp] /* [rip + .LCPI5_12] */ LONG $0xc46f0f66 // movdqa xmm0, xmm4 LONG $0x380f4166; WORD $0xe910 // pblendvb xmm5, xmm9, xmm0 - LONG $0x646e0f66; WORD $0x2824 // movd xmm4, dword [rsp + 40] + LONG $0x646e0f66; WORD $0x2024 // movd xmm4, dword [rsp + 32] LONG $0x54b70f45; WORD $0x1e3e // movzx r10d, word [r14 + rdi + 30] QUAD $0x070c0e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 12], 7 LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 @@ -28730,26 +29989,26 @@ LBB5_110: QUAD $0x0000d0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 208[rbp] /* [rip + .LCPI5_13] */ LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4566; WORD $0xe910 // pblendvb xmm13, xmm9, xmm0 - LONG $0x5c6e0f66; WORD $0x2024 // movd xmm3, dword [rsp + 32] + LONG $0x5c6e0f66; WORD $0x1824 // movd xmm3, dword [rsp + 24] LONG $0x44b70f41; WORD $0x203e // movzx eax, word [r14 + rdi + 32] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x010e0e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 14], 1 QUAD $0x020e3e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 14], 2 QUAD $0x030e364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 14], 3 - QUAD $0x040e164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 14], 4 - QUAD $0x050e2e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 14], 5 + QUAD $0x040e264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 14], 4 + QUAD $0x050e164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 14], 5 QUAD $0x060e1e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 14], 6 LONG $0xeb0f4466; BYTE $0xed // por xmm13, xmm5 LONG $0x6e0f4166; BYTE $0xd3 // movd xmm2, r11d LONG $0x44b70f41; WORD $0x223e // movzx eax, word [r14 + rdi + 34] - LONG $0x28244489 // mov dword [rsp + 40], eax + LONG $0x20244489 // mov dword [rsp + 32], eax QUAD $0x070e0e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 14], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 QUAD $0x01120e74c40f4166 // pinsrw xmm6, word [r14 + rcx + 18], 1 QUAD $0x02123e74c40f4366 // pinsrw xmm6, word [r14 + r15 + 18], 2 QUAD $0x03123674c40f4166 // pinsrw xmm6, word [r14 + rsi + 18], 3 - QUAD $0x04121674c40f4166 // pinsrw xmm6, word [r14 + rdx + 18], 4 - QUAD $0x05122e74c40f4366 // pinsrw xmm6, word [r14 + r13 + 18], 5 + QUAD $0x04122674c40f4366 // pinsrw xmm6, word [r14 + r12 + 18], 4 + QUAD $0x05121674c40f4166 // pinsrw xmm6, word [r14 + rdx + 18], 5 QUAD $0x06121e74c40f4166 // pinsrw xmm6, word [r14 + rbx + 18], 6 LONG $0xc9630f66 // packsswb xmm1, xmm1 QUAD $0x07120e74c40f4366 // pinsrw xmm6, word [r14 + r9 + 18], 7 @@ -28768,14 +30027,14 @@ LBB5_110: QUAD $0x01100e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 16], 1 QUAD $0x02103e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 16], 2 QUAD $0x0310367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 16], 3 - QUAD $0x0410167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 16], 4 - QUAD $0x05102e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 16], 5 + QUAD $0x0410267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 16], 4 + QUAD $0x0510167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 16], 5 QUAD $0x06101e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 16], 6 QUAD $0x01140e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 20], 1 QUAD $0x02143e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 20], 2 QUAD $0x03143664c40f4166 // pinsrw xmm4, word [r14 + rsi + 20], 3 - QUAD $0x04141664c40f4166 // pinsrw xmm4, word [r14 + rdx + 20], 4 - QUAD $0x05142e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 20], 5 + QUAD $0x04142664c40f4366 // pinsrw xmm4, word [r14 + r12 + 20], 4 + QUAD $0x05141664c40f4166 // pinsrw xmm4, word [r14 + rdx + 20], 5 QUAD $0x06141e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 20], 6 QUAD $0x07140e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 20], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -28783,8 +30042,8 @@ LBB5_110: LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 LONG $0xc46f0f66 // movdqa xmm0, xmm4 LONG $0x380f4566; WORD $0xf910 // pblendvb xmm15, xmm9, xmm0 - LONG $0x6e0f4166; BYTE $0xe4 // movd xmm4, r12d - LONG $0x64b70f45; WORD $0x263e // movzx r12d, word [r14 + rdi + 38] + LONG $0x6e0f4166; BYTE $0xe5 // movd xmm4, r13d + LONG $0x6cb70f45; WORD $0x263e // movzx r13d, word [r14 + rdi + 38] QUAD $0x07100e7cc40f4366 // pinsrw xmm7, word [r14 + r9 + 16], 7 LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 QUAD $0x00000160bdef0f66 // pxor xmm7, oword 352[rbp] /* [rip + .LCPI5_22] */ @@ -28792,8 +30051,8 @@ LBB5_110: QUAD $0x01160e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 22], 1 QUAD $0x02163e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 22], 2 QUAD $0x0316365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 22], 3 - QUAD $0x0416165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 22], 4 - QUAD $0x05162e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 22], 5 + QUAD $0x0416265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 22], 4 + QUAD $0x0516165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 22], 5 QUAD $0x06161e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 22], 6 QUAD $0x07160e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 22], 7 LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 @@ -28801,8 +30060,8 @@ LBB5_110: QUAD $0x01180e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 24], 1 QUAD $0x02183e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 24], 2 QUAD $0x03183654c40f4166 // pinsrw xmm2, word [r14 + rsi + 24], 3 - QUAD $0x04181654c40f4166 // pinsrw xmm2, word [r14 + rdx + 24], 4 - QUAD $0x05182e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 24], 5 + QUAD $0x04182654c40f4366 // pinsrw xmm2, word [r14 + r12 + 24], 4 + QUAD $0x05181654c40f4166 // pinsrw xmm2, word [r14 + rdx + 24], 5 QUAD $0x06181e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 24], 6 QUAD $0x07180e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 24], 7 LONG $0xf80f4466; BYTE $0xc7 // psubb xmm8, xmm7 @@ -28817,13 +30076,13 @@ LBB5_110: QUAD $0x0000b0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 176[rbp] /* [rip + .LCPI5_11] */ LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4566; WORD $0xe910 // pblendvb xmm13, xmm9, xmm0 - LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] + LONG $0x7c6e0f66; WORD $0x1824 // movd xmm7, dword [rsp + 24] LONG $0x54b70f45; WORD $0x2a3e // movzx r10d, word [r14 + rdi + 42] QUAD $0x011a0e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 26], 1 QUAD $0x021a3e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 26], 2 QUAD $0x031a364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 26], 3 - QUAD $0x041a164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 26], 4 - QUAD $0x051a2e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 26], 5 + QUAD $0x041a264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 26], 4 + QUAD $0x051a164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 26], 5 QUAD $0x061a1e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 26], 6 QUAD $0x071a0e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 26], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -28831,17 +30090,17 @@ LBB5_110: QUAD $0x011c0e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 28], 1 QUAD $0x021c3e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 28], 2 QUAD $0x031c3664c40f4166 // pinsrw xmm4, word [r14 + rsi + 28], 3 - QUAD $0x041c1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 28], 4 - QUAD $0x051c2e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 28], 5 + QUAD $0x041c2664c40f4366 // pinsrw xmm4, word [r14 + r12 + 28], 4 + QUAD $0x051c1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 28], 5 QUAD $0x061c1e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 28], 6 LONG $0xeb0f4566; BYTE $0xf0 // por xmm14, xmm8 QUAD $0x0000c0bd6f0f4466; BYTE $0x00 // movdqa xmm15, oword 192[rbp] /* [rip + .LCPI5_12] */ LONG $0x6f0f4166; BYTE $0xef // movdqa xmm5, xmm15 LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xe910 // pblendvb xmm5, xmm9, xmm0 - LONG $0x546e0f66; WORD $0x2824 // movd xmm2, dword [rsp + 40] + LONG $0x546e0f66; WORD $0x2024 // movd xmm2, dword [rsp + 32] LONG $0x44b70f41; WORD $0x2c3e // movzx eax, word [r14 + rdi + 44] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x071c0e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 28], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -28854,20 +30113,20 @@ LBB5_110: QUAD $0x011e0e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 30], 1 QUAD $0x021e3e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 30], 2 QUAD $0x031e365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 30], 3 - QUAD $0x041e165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 30], 4 - QUAD $0x051e2e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 30], 5 + QUAD $0x041e265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 30], 4 + QUAD $0x051e165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 30], 5 QUAD $0x061e1e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 30], 6 LONG $0xf5eb0f66 // por xmm6, xmm5 - LONG $0x6e0f4166; BYTE $0xcc // movd xmm1, r12d + LONG $0x6e0f4166; BYTE $0xcd // movd xmm1, r13d LONG $0x44b70f41; WORD $0x303e // movzx eax, word [r14 + rdi + 48] - LONG $0x28244489 // mov dword [rsp + 40], eax + LONG $0x20244489 // mov dword [rsp + 32], eax QUAD $0x071e0e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 30], 7 LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 QUAD $0x01220e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 34], 1 QUAD $0x02223e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 34], 2 QUAD $0x03223654c40f4166 // pinsrw xmm2, word [r14 + rsi + 34], 3 - QUAD $0x04221654c40f4166 // pinsrw xmm2, word [r14 + rdx + 34], 4 - QUAD $0x05222e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 34], 5 + QUAD $0x04222654c40f4366 // pinsrw xmm2, word [r14 + r12 + 34], 4 + QUAD $0x05221654c40f4166 // pinsrw xmm2, word [r14 + rdx + 34], 5 QUAD $0x06221e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 34], 6 LONG $0xdb630f66 // packsswb xmm3, xmm3 QUAD $0x07220e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 34], 7 @@ -28881,18 +30140,18 @@ LBB5_110: LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4566; WORD $0xc110 // pblendvb xmm8, xmm9, xmm0 LONG $0x6e0f4166; BYTE $0xd0 // movd xmm2, r8d - LONG $0x64b70f45; WORD $0x323e // movzx r12d, word [r14 + rdi + 50] + LONG $0x6cb70f45; WORD $0x323e // movzx r13d, word [r14 + rdi + 50] QUAD $0x01200e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 32], 1 QUAD $0x02203e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 32], 2 QUAD $0x0320367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 32], 3 - QUAD $0x0420167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 32], 4 - QUAD $0x05202e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 32], 5 + QUAD $0x0420267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 32], 4 + QUAD $0x0520167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 32], 5 QUAD $0x06201e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 32], 6 QUAD $0x01240e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 36], 1 QUAD $0x02243e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 36], 2 QUAD $0x03243664c40f4166 // pinsrw xmm4, word [r14 + rsi + 36], 3 - QUAD $0x04241664c40f4166 // pinsrw xmm4, word [r14 + rdx + 36], 4 - QUAD $0x05242e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 36], 5 + QUAD $0x04242664c40f4366 // pinsrw xmm4, word [r14 + r12 + 36], 4 + QUAD $0x05241664c40f4166 // pinsrw xmm4, word [r14 + rdx + 36], 5 QUAD $0x06241e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 36], 6 QUAD $0x07240e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 36], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -28911,8 +30170,8 @@ LBB5_110: QUAD $0x01260e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 38], 1 QUAD $0x02263e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 38], 2 QUAD $0x0326364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 38], 3 - QUAD $0x0426164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 38], 4 - QUAD $0x05262e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 38], 5 + QUAD $0x0426264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 38], 4 + QUAD $0x0526164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 38], 5 QUAD $0x06261e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 38], 6 QUAD $0x07260e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 38], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -28920,15 +30179,15 @@ LBB5_110: QUAD $0x01280e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 40], 1 QUAD $0x02283e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 40], 2 QUAD $0x03283654c40f4166 // pinsrw xmm2, word [r14 + rsi + 40], 3 - QUAD $0x04281654c40f4166 // pinsrw xmm2, word [r14 + rdx + 40], 4 - QUAD $0x05282e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 40], 5 + QUAD $0x04282654c40f4366 // pinsrw xmm2, word [r14 + r12 + 40], 4 + QUAD $0x05281654c40f4166 // pinsrw xmm2, word [r14 + rdx + 40], 5 QUAD $0x06281e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 40], 6 QUAD $0x07280e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 40], 7 LONG $0xf80f4466; BYTE $0xc7 // psubb xmm8, xmm7 QUAD $0x000000a0ad6f0f66 // movdqa xmm5, oword 160[rbp] /* [rip + .LCPI5_10] */ LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xe910 // pblendvb xmm5, xmm9, xmm0 - LONG $0x4c6e0f66; WORD $0x2024 // movd xmm1, dword [rsp + 32] + LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] LONG $0x44b70f45; WORD $0x363e // movzx r8d, word [r14 + rdi + 54] LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -28941,8 +30200,8 @@ LBB5_110: QUAD $0x012a0e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 42], 1 QUAD $0x022a3e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 42], 2 QUAD $0x032a365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 42], 3 - QUAD $0x042a165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 42], 4 - QUAD $0x052a2e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 42], 5 + QUAD $0x042a265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 42], 4 + QUAD $0x052a165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 42], 5 QUAD $0x062a1e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 42], 6 QUAD $0x072a0e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 42], 7 LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 @@ -28950,14 +30209,14 @@ LBB5_110: QUAD $0x012c0e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 44], 1 QUAD $0x022c3e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 44], 2 QUAD $0x032c364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 44], 3 - QUAD $0x042c164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 44], 4 - QUAD $0x052c2e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 44], 5 + QUAD $0x042c264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 44], 4 + QUAD $0x052c164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 44], 5 QUAD $0x062c1e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 44], 6 LONG $0xeb0f4166; BYTE $0xe8 // por xmm5, xmm8 LONG $0x6f0f4166; BYTE $0xd7 // movdqa xmm2, xmm15 LONG $0xc36f0f66 // movdqa xmm0, xmm3 LONG $0x380f4166; WORD $0xd110 // pblendvb xmm2, xmm9, xmm0 - LONG $0x7c6e0f66; WORD $0x2824 // movd xmm7, dword [rsp + 40] + LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] LONG $0x5cb70f45; WORD $0x3a3e // movzx r11d, word [r14 + rdi + 58] QUAD $0x072c0e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 44], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -28967,15 +30226,15 @@ LBB5_110: LONG $0x6f0f4166; BYTE $0xf7 // movdqa xmm6, xmm15 LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xf110 // pblendvb xmm6, xmm9, xmm0 - LONG $0x6e0f4166; BYTE $0xcc // movd xmm1, r12d - LONG $0x64b70f45; WORD $0x3c3e // movzx r12d, word [r14 + rdi + 60] + LONG $0x6e0f4166; BYTE $0xcd // movd xmm1, r13d + LONG $0x6cb70f45; WORD $0x3c3e // movzx r13d, word [r14 + rdi + 60] LONG $0xf2eb0f66 // por xmm6, xmm2 LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d QUAD $0x012e0e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 46], 1 QUAD $0x022e3e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 46], 2 QUAD $0x032e3664c40f4166 // pinsrw xmm4, word [r14 + rsi + 46], 3 - QUAD $0x042e1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 46], 4 - QUAD $0x052e2e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 46], 5 + QUAD $0x042e2664c40f4366 // pinsrw xmm4, word [r14 + r12 + 46], 4 + QUAD $0x052e1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 46], 5 QUAD $0x062e1e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 46], 6 QUAD $0x072e0e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 46], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -28988,8 +30247,8 @@ LBB5_110: QUAD $0x01320e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 50], 1 QUAD $0x02323e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 50], 2 QUAD $0x0332364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 50], 3 - QUAD $0x0432164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 50], 4 - QUAD $0x05322e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 50], 5 + QUAD $0x0432264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 50], 4 + QUAD $0x0532164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 50], 5 QUAD $0x06321e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 50], 6 QUAD $0x07320e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 50], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -29002,8 +30261,8 @@ LBB5_110: QUAD $0x01300e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 48], 1 QUAD $0x02303e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 48], 2 QUAD $0x0330367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 48], 3 - QUAD $0x0430167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 48], 4 - QUAD $0x05302e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 48], 5 + QUAD $0x0430267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 48], 4 + QUAD $0x0530167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 48], 5 QUAD $0x06301e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 48], 6 QUAD $0x07300e7cc40f4366 // pinsrw xmm7, word [r14 + r9 + 48], 7 LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 @@ -29011,8 +30270,8 @@ LBB5_110: QUAD $0x01340e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 52], 1 QUAD $0x02343e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 52], 2 QUAD $0x03343654c40f4166 // pinsrw xmm2, word [r14 + rsi + 52], 3 - QUAD $0x04341654c40f4166 // pinsrw xmm2, word [r14 + rdx + 52], 4 - QUAD $0x05342e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 52], 5 + QUAD $0x04342654c40f4366 // pinsrw xmm2, word [r14 + r12 + 52], 4 + QUAD $0x05341654c40f4166 // pinsrw xmm2, word [r14 + rdx + 52], 5 QUAD $0x06341e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 52], 6 LONG $0xff630f66 // packsswb xmm7, xmm7 QUAD $0x07340e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 52], 7 @@ -29020,8 +30279,8 @@ LBB5_110: QUAD $0x01360e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 54], 1 QUAD $0x02363e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 54], 2 QUAD $0x0336365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 54], 3 - QUAD $0x0436165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 54], 4 - QUAD $0x05362e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 54], 5 + QUAD $0x0436265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 54], 4 + QUAD $0x0536165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 54], 5 QUAD $0x06361e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 54], 6 LONG $0xd2630f66 // packsswb xmm2, xmm2 QUAD $0x07360e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 54], 7 @@ -29029,8 +30288,8 @@ LBB5_110: QUAD $0x01380e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 56], 1 QUAD $0x02383e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 56], 2 QUAD $0x0338364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 56], 3 - QUAD $0x0438164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 56], 4 - QUAD $0x05382e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 56], 5 + QUAD $0x0438264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 56], 4 + QUAD $0x0538164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 56], 5 QUAD $0x06381e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 56], 6 LONG $0xdb630f66 // packsswb xmm3, xmm3 QUAD $0x07380e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 56], 7 @@ -29046,20 +30305,20 @@ LBB5_110: QUAD $0x013a0e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 58], 1 QUAD $0x023a3e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 58], 2 QUAD $0x033a3654c40f4166 // pinsrw xmm2, word [r14 + rsi + 58], 3 - QUAD $0x043a1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 58], 4 - QUAD $0x053a2e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 58], 5 + QUAD $0x043a2654c40f4366 // pinsrw xmm2, word [r14 + r12 + 58], 4 + QUAD $0x053a1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 58], 5 QUAD $0x063a1e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 58], 6 QUAD $0x073a0e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 58], 7 LONG $0xc9630f66 // packsswb xmm1, xmm1 LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 LONG $0xf5eb0f66 // por xmm6, xmm5 - LONG $0x6e0f4166; BYTE $0xdc // movd xmm3, r12d + LONG $0x6e0f4166; BYTE $0xdd // movd xmm3, r13d LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] QUAD $0x013c0e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 60], 1 QUAD $0x023c3e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 60], 2 QUAD $0x033c365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 60], 3 - QUAD $0x043c165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 60], 4 - QUAD $0x053c2e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 60], 5 + QUAD $0x043c265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 60], 4 + QUAD $0x053c165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 60], 5 QUAD $0x063c1e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 60], 6 LONG $0xd2630f66 // packsswb xmm2, xmm2 QUAD $0x073c0e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 60], 7 @@ -29082,8 +30341,8 @@ LBB5_110: QUAD $0x013e0e44c40f4166 // pinsrw xmm0, word [r14 + rcx + 62], 1 QUAD $0x023e3e44c40f4366 // pinsrw xmm0, word [r14 + r15 + 62], 2 QUAD $0x033e3644c40f4166 // pinsrw xmm0, word [r14 + rsi + 62], 3 - QUAD $0x043e1644c40f4166 // pinsrw xmm0, word [r14 + rdx + 62], 4 - QUAD $0x053e2e44c40f4366 // pinsrw xmm0, word [r14 + r13 + 62], 5 + QUAD $0x043e2644c40f4366 // pinsrw xmm0, word [r14 + r12 + 62], 4 + QUAD $0x053e1644c40f4166 // pinsrw xmm0, word [r14 + rdx + 62], 5 QUAD $0x063e1e44c40f4166 // pinsrw xmm0, word [r14 + rbx + 62], 6 QUAD $0x073e0e44c40f4366 // pinsrw xmm0, word [r14 + r9 + 62], 7 LONG $0x750f4166; BYTE $0xc3 // pcmpeqw xmm0, xmm11 @@ -29108,25 +30367,25 @@ LBB5_110: LONG $0x7f0f41f3; WORD $0x8844; BYTE $0x10 // movdqu oword [r8 + 4*rcx + 16], xmm0 LONG $0x08c18348 // add rcx, 8 WORD $0x8948; BYTE $0xcf // mov rdi, rcx - LONG $0x244c3b48; BYTE $0x18 // cmp rcx, qword [rsp + 24] - JNE LBB5_110 - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] - LONG $0x24543b4c; BYTE $0x18 // cmp r10, qword [rsp + 24] + LONG $0x244c3b48; BYTE $0x28 // cmp rcx, qword [rsp + 40] + JNE LBB5_111 + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + LONG $0x24543b4c; BYTE $0x28 // cmp r10, qword [rsp + 40] QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] LONG $0x245c8b44; BYTE $0x10 // mov r11d, dword [rsp + 16] LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - JNE LBB5_112 - JMP LBB5_115 + JNE LBB5_113 + JMP LBB5_116 -LBB5_132: +LBB5_133: LONG $0xf8e28349 // and r10, -8 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x06e0c148 // shl rax, 6 WORD $0x014c; BYTE $0xf0 // add rax, r14 LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax + LONG $0x2454894c; BYTE $0x28 // mov qword [rsp + 40], r10 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - LONG $0x2454894c; BYTE $0x18 // mov qword [rsp + 24], r10 LONG $0x90048d4a // lea rax, [rax + 4*r10] LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax LONG $0x6e0f4166; BYTE $0xc3 // movd xmm0, r11d @@ -29135,13 +30394,13 @@ LBB5_132: WORD $0xff31 // xor edi, edi LONG $0xef0f4566; BYTE $0xc9 // pxor xmm9, xmm9 -LBB5_133: +LBB5_134: LONG $0x247c8948; BYTE $0x40 // mov qword [rsp + 64], rdi LONG $0x06e7c148 // shl rdi, 6 WORD $0x8949; BYTE $0xff // mov r15, rdi WORD $0x8948; BYTE $0xfe // mov rsi, rdi + WORD $0x8949; BYTE $0xfc // mov r12, rdi WORD $0x8948; BYTE $0xfa // mov rdx, rdi - WORD $0x8949; BYTE $0xfd // mov r13, rdi WORD $0x8948; BYTE $0xfb // mov rbx, rdi WORD $0x8949; BYTE $0xf9 // mov r9, rdi LONG $0x04b70f41; BYTE $0x3e // movzx eax, word [r14 + rdi] @@ -29159,39 +30418,39 @@ LBB5_133: LONG $0x44b70f41; WORD $0x0c3e // movzx eax, word [r14 + rdi + 12] LONG $0x44b70f45; WORD $0x0e3e // movzx r8d, word [r14 + rdi + 14] LONG $0x54b70f45; WORD $0x103e // movzx r10d, word [r14 + rdi + 16] - LONG $0x64b70f45; WORD $0x123e // movzx r12d, word [r14 + rdi + 18] + LONG $0x6cb70f45; WORD $0x123e // movzx r13d, word [r14 + rdi + 18] LONG $0x4cb70f41; WORD $0x143e // movzx ecx, word [r14 + rdi + 20] - LONG $0x28244c89 // mov dword [rsp + 40], ecx + LONG $0x20244c89 // mov dword [rsp + 32], ecx WORD $0x8948; BYTE $0xf9 // mov rcx, rdi LONG $0x40c98348 // or rcx, 64 LONG $0x80cf8149; WORD $0x0000; BYTE $0x00 // or r15, 128 LONG $0xc0ce8148; WORD $0x0000; BYTE $0x00 // or rsi, 192 - LONG $0x00ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 256 - LONG $0x40cd8149; WORD $0x0001; BYTE $0x00 // or r13, 320 + LONG $0x00cc8149; WORD $0x0001; BYTE $0x00 // or r12, 256 + LONG $0x40ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 320 LONG $0x80cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 384 LONG $0xc40f4166; WORD $0x0e2c; BYTE $0x01 // pinsrw xmm5, word [r14 + rcx], 1 LONG $0xc40f4366; WORD $0x3e2c; BYTE $0x02 // pinsrw xmm5, word [r14 + r15], 2 LONG $0xc40f4166; WORD $0x362c; BYTE $0x03 // pinsrw xmm5, word [r14 + rsi], 3 - LONG $0xc40f4166; WORD $0x162c; BYTE $0x04 // pinsrw xmm5, word [r14 + rdx], 4 - LONG $0xc40f4366; WORD $0x2e2c; BYTE $0x05 // pinsrw xmm5, word [r14 + r13], 5 + LONG $0xc40f4366; WORD $0x262c; BYTE $0x04 // pinsrw xmm5, word [r14 + r12], 4 + LONG $0xc40f4166; WORD $0x162c; BYTE $0x05 // pinsrw xmm5, word [r14 + rdx], 5 LONG $0xc40f4166; WORD $0x1e2c; BYTE $0x06 // pinsrw xmm5, word [r14 + rbx], 6 QUAD $0x01020e44c40f4166 // pinsrw xmm0, word [r14 + rcx + 2], 1 QUAD $0x02023e44c40f4366 // pinsrw xmm0, word [r14 + r15 + 2], 2 QUAD $0x03023644c40f4166 // pinsrw xmm0, word [r14 + rsi + 2], 3 - QUAD $0x04021644c40f4166 // pinsrw xmm0, word [r14 + rdx + 2], 4 - QUAD $0x05022e44c40f4366 // pinsrw xmm0, word [r14 + r13 + 2], 5 + QUAD $0x04022644c40f4366 // pinsrw xmm0, word [r14 + r12 + 2], 4 + QUAD $0x05021644c40f4166 // pinsrw xmm0, word [r14 + rdx + 2], 5 QUAD $0x06021e44c40f4166 // pinsrw xmm0, word [r14 + rbx + 2], 6 LONG $0xc0c98149; WORD $0x0001; BYTE $0x00 // or r9, 448 QUAD $0x07020e44c40f4366 // pinsrw xmm0, word [r14 + r9 + 2], 7 LONG $0xd06e0f66 // movd xmm2, eax LONG $0x44b70f41; WORD $0x163e // movzx eax, word [r14 + rdi + 22] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax LONG $0x750f4166; BYTE $0xc3 // pcmpeqw xmm0, xmm11 QUAD $0x01040e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 4], 1 QUAD $0x02043e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 4], 2 QUAD $0x0304364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 4], 3 - QUAD $0x0404164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 4], 4 - QUAD $0x05042e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 4], 5 + QUAD $0x0404264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 4], 4 + QUAD $0x0504164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 4], 5 QUAD $0x06041e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 4], 6 QUAD $0x07040e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 4], 7 LONG $0xc0630f66 // packsswb xmm0, xmm0 @@ -29214,8 +30473,8 @@ LBB5_133: QUAD $0x01060e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 6], 1 QUAD $0x02063e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 6], 2 QUAD $0x0306367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 6], 3 - QUAD $0x0406167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 6], 4 - QUAD $0x05062e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 6], 5 + QUAD $0x0406267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 6], 4 + QUAD $0x0506167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 6], 5 QUAD $0x06061e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 6], 6 QUAD $0x07060e7cc40f4366 // pinsrw xmm7, word [r14 + r9 + 6], 7 LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 @@ -29223,8 +30482,8 @@ LBB5_133: QUAD $0x01080e44c40f4566 // pinsrw xmm8, word [r14 + rcx + 8], 1 QUAD $0x02083e44c40f4766 // pinsrw xmm8, word [r14 + r15 + 8], 2 QUAD $0x03083644c40f4566 // pinsrw xmm8, word [r14 + rsi + 8], 3 - QUAD $0x04081644c40f4566 // pinsrw xmm8, word [r14 + rdx + 8], 4 - QUAD $0x05082e44c40f4766 // pinsrw xmm8, word [r14 + r13 + 8], 5 + QUAD $0x04082644c40f4766 // pinsrw xmm8, word [r14 + r12 + 8], 4 + QUAD $0x05081644c40f4566 // pinsrw xmm8, word [r14 + rdx + 8], 5 QUAD $0x06081e44c40f4566 // pinsrw xmm8, word [r14 + rbx + 8], 6 QUAD $0x07080e44c40f4766 // pinsrw xmm8, word [r14 + r9 + 8], 7 LONG $0xddf80f66 // psubb xmm3, xmm5 @@ -29239,13 +30498,13 @@ LBB5_133: QUAD $0x0000b0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 176[rbp] /* [rip + .LCPI5_11] */ LONG $0x6f0f4166; BYTE $0xc0 // movdqa xmm0, xmm8 LONG $0x380f4566; WORD $0xe910 // pblendvb xmm13, xmm9, xmm0 - LONG $0x6e0f4166; BYTE $0xf4 // movd xmm6, r12d - LONG $0x64b70f45; WORD $0x1c3e // movzx r12d, word [r14 + rdi + 28] + LONG $0x6e0f4166; BYTE $0xf5 // movd xmm6, r13d + LONG $0x6cb70f45; WORD $0x1c3e // movzx r13d, word [r14 + rdi + 28] QUAD $0x010a0e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 10], 1 QUAD $0x020a3e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 10], 2 QUAD $0x030a3664c40f4166 // pinsrw xmm4, word [r14 + rsi + 10], 3 - QUAD $0x040a1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 10], 4 - QUAD $0x050a2e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 10], 5 + QUAD $0x040a2664c40f4366 // pinsrw xmm4, word [r14 + r12 + 10], 4 + QUAD $0x050a1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 10], 5 QUAD $0x060a1e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 10], 6 QUAD $0x070a0e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 10], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -29253,14 +30512,14 @@ LBB5_133: QUAD $0x010c0e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 12], 1 QUAD $0x020c3e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 12], 2 QUAD $0x030c3654c40f4166 // pinsrw xmm2, word [r14 + rsi + 12], 3 - QUAD $0x040c1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 12], 4 - QUAD $0x050c2e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 12], 5 + QUAD $0x040c2654c40f4366 // pinsrw xmm2, word [r14 + r12 + 12], 4 + QUAD $0x050c1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 12], 5 QUAD $0x060c1e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 12], 6 LONG $0xeb0f4466; BYTE $0xe3 // por xmm12, xmm3 QUAD $0x000000c0ad6f0f66 // movdqa xmm5, oword 192[rbp] /* [rip + .LCPI5_12] */ LONG $0xc46f0f66 // movdqa xmm0, xmm4 LONG $0x380f4166; WORD $0xe910 // pblendvb xmm5, xmm9, xmm0 - LONG $0x646e0f66; WORD $0x2824 // movd xmm4, dword [rsp + 40] + LONG $0x646e0f66; WORD $0x2024 // movd xmm4, dword [rsp + 32] LONG $0x54b70f45; WORD $0x1e3e // movzx r10d, word [r14 + rdi + 30] QUAD $0x070c0e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 12], 7 LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 @@ -29269,26 +30528,26 @@ LBB5_133: QUAD $0x0000d0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 208[rbp] /* [rip + .LCPI5_13] */ LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4566; WORD $0xe910 // pblendvb xmm13, xmm9, xmm0 - LONG $0x5c6e0f66; WORD $0x2024 // movd xmm3, dword [rsp + 32] + LONG $0x5c6e0f66; WORD $0x1824 // movd xmm3, dword [rsp + 24] LONG $0x44b70f41; WORD $0x203e // movzx eax, word [r14 + rdi + 32] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x010e0e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 14], 1 QUAD $0x020e3e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 14], 2 QUAD $0x030e364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 14], 3 - QUAD $0x040e164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 14], 4 - QUAD $0x050e2e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 14], 5 + QUAD $0x040e264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 14], 4 + QUAD $0x050e164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 14], 5 QUAD $0x060e1e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 14], 6 LONG $0xeb0f4466; BYTE $0xed // por xmm13, xmm5 LONG $0x6e0f4166; BYTE $0xd3 // movd xmm2, r11d LONG $0x44b70f41; WORD $0x223e // movzx eax, word [r14 + rdi + 34] - LONG $0x28244489 // mov dword [rsp + 40], eax + LONG $0x20244489 // mov dword [rsp + 32], eax QUAD $0x070e0e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 14], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 QUAD $0x01120e74c40f4166 // pinsrw xmm6, word [r14 + rcx + 18], 1 QUAD $0x02123e74c40f4366 // pinsrw xmm6, word [r14 + r15 + 18], 2 QUAD $0x03123674c40f4166 // pinsrw xmm6, word [r14 + rsi + 18], 3 - QUAD $0x04121674c40f4166 // pinsrw xmm6, word [r14 + rdx + 18], 4 - QUAD $0x05122e74c40f4366 // pinsrw xmm6, word [r14 + r13 + 18], 5 + QUAD $0x04122674c40f4366 // pinsrw xmm6, word [r14 + r12 + 18], 4 + QUAD $0x05121674c40f4166 // pinsrw xmm6, word [r14 + rdx + 18], 5 QUAD $0x06121e74c40f4166 // pinsrw xmm6, word [r14 + rbx + 18], 6 LONG $0xc9630f66 // packsswb xmm1, xmm1 QUAD $0x07120e74c40f4366 // pinsrw xmm6, word [r14 + r9 + 18], 7 @@ -29307,14 +30566,14 @@ LBB5_133: QUAD $0x01100e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 16], 1 QUAD $0x02103e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 16], 2 QUAD $0x0310367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 16], 3 - QUAD $0x0410167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 16], 4 - QUAD $0x05102e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 16], 5 + QUAD $0x0410267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 16], 4 + QUAD $0x0510167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 16], 5 QUAD $0x06101e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 16], 6 QUAD $0x01140e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 20], 1 QUAD $0x02143e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 20], 2 QUAD $0x03143664c40f4166 // pinsrw xmm4, word [r14 + rsi + 20], 3 - QUAD $0x04141664c40f4166 // pinsrw xmm4, word [r14 + rdx + 20], 4 - QUAD $0x05142e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 20], 5 + QUAD $0x04142664c40f4366 // pinsrw xmm4, word [r14 + r12 + 20], 4 + QUAD $0x05141664c40f4166 // pinsrw xmm4, word [r14 + rdx + 20], 5 QUAD $0x06141e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 20], 6 QUAD $0x07140e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 20], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -29322,8 +30581,8 @@ LBB5_133: LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 LONG $0xc46f0f66 // movdqa xmm0, xmm4 LONG $0x380f4566; WORD $0xf910 // pblendvb xmm15, xmm9, xmm0 - LONG $0x6e0f4166; BYTE $0xe4 // movd xmm4, r12d - LONG $0x64b70f45; WORD $0x263e // movzx r12d, word [r14 + rdi + 38] + LONG $0x6e0f4166; BYTE $0xe5 // movd xmm4, r13d + LONG $0x6cb70f45; WORD $0x263e // movzx r13d, word [r14 + rdi + 38] QUAD $0x07100e7cc40f4366 // pinsrw xmm7, word [r14 + r9 + 16], 7 LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 QUAD $0x00000160bdef0f66 // pxor xmm7, oword 352[rbp] /* [rip + .LCPI5_22] */ @@ -29331,8 +30590,8 @@ LBB5_133: QUAD $0x01160e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 22], 1 QUAD $0x02163e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 22], 2 QUAD $0x0316365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 22], 3 - QUAD $0x0416165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 22], 4 - QUAD $0x05162e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 22], 5 + QUAD $0x0416265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 22], 4 + QUAD $0x0516165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 22], 5 QUAD $0x06161e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 22], 6 QUAD $0x07160e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 22], 7 LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 @@ -29340,8 +30599,8 @@ LBB5_133: QUAD $0x01180e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 24], 1 QUAD $0x02183e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 24], 2 QUAD $0x03183654c40f4166 // pinsrw xmm2, word [r14 + rsi + 24], 3 - QUAD $0x04181654c40f4166 // pinsrw xmm2, word [r14 + rdx + 24], 4 - QUAD $0x05182e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 24], 5 + QUAD $0x04182654c40f4366 // pinsrw xmm2, word [r14 + r12 + 24], 4 + QUAD $0x05181654c40f4166 // pinsrw xmm2, word [r14 + rdx + 24], 5 QUAD $0x06181e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 24], 6 QUAD $0x07180e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 24], 7 LONG $0xf80f4466; BYTE $0xc7 // psubb xmm8, xmm7 @@ -29356,13 +30615,13 @@ LBB5_133: QUAD $0x0000b0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 176[rbp] /* [rip + .LCPI5_11] */ LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4566; WORD $0xe910 // pblendvb xmm13, xmm9, xmm0 - LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] + LONG $0x7c6e0f66; WORD $0x1824 // movd xmm7, dword [rsp + 24] LONG $0x54b70f45; WORD $0x2a3e // movzx r10d, word [r14 + rdi + 42] QUAD $0x011a0e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 26], 1 QUAD $0x021a3e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 26], 2 QUAD $0x031a364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 26], 3 - QUAD $0x041a164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 26], 4 - QUAD $0x051a2e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 26], 5 + QUAD $0x041a264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 26], 4 + QUAD $0x051a164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 26], 5 QUAD $0x061a1e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 26], 6 QUAD $0x071a0e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 26], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -29370,17 +30629,17 @@ LBB5_133: QUAD $0x011c0e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 28], 1 QUAD $0x021c3e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 28], 2 QUAD $0x031c3664c40f4166 // pinsrw xmm4, word [r14 + rsi + 28], 3 - QUAD $0x041c1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 28], 4 - QUAD $0x051c2e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 28], 5 + QUAD $0x041c2664c40f4366 // pinsrw xmm4, word [r14 + r12 + 28], 4 + QUAD $0x051c1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 28], 5 QUAD $0x061c1e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 28], 6 LONG $0xeb0f4566; BYTE $0xf0 // por xmm14, xmm8 QUAD $0x0000c0bd6f0f4466; BYTE $0x00 // movdqa xmm15, oword 192[rbp] /* [rip + .LCPI5_12] */ LONG $0x6f0f4166; BYTE $0xef // movdqa xmm5, xmm15 LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xe910 // pblendvb xmm5, xmm9, xmm0 - LONG $0x546e0f66; WORD $0x2824 // movd xmm2, dword [rsp + 40] + LONG $0x546e0f66; WORD $0x2024 // movd xmm2, dword [rsp + 32] LONG $0x44b70f41; WORD $0x2c3e // movzx eax, word [r14 + rdi + 44] - LONG $0x20244489 // mov dword [rsp + 32], eax + LONG $0x18244489 // mov dword [rsp + 24], eax QUAD $0x071c0e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 28], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 LONG $0xe4630f66 // packsswb xmm4, xmm4 @@ -29393,20 +30652,20 @@ LBB5_133: QUAD $0x011e0e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 30], 1 QUAD $0x021e3e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 30], 2 QUAD $0x031e365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 30], 3 - QUAD $0x041e165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 30], 4 - QUAD $0x051e2e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 30], 5 + QUAD $0x041e265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 30], 4 + QUAD $0x051e165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 30], 5 QUAD $0x061e1e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 30], 6 LONG $0xf5eb0f66 // por xmm6, xmm5 - LONG $0x6e0f4166; BYTE $0xcc // movd xmm1, r12d + LONG $0x6e0f4166; BYTE $0xcd // movd xmm1, r13d LONG $0x44b70f41; WORD $0x303e // movzx eax, word [r14 + rdi + 48] - LONG $0x28244489 // mov dword [rsp + 40], eax + LONG $0x20244489 // mov dword [rsp + 32], eax QUAD $0x071e0e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 30], 7 LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 QUAD $0x01220e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 34], 1 QUAD $0x02223e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 34], 2 QUAD $0x03223654c40f4166 // pinsrw xmm2, word [r14 + rsi + 34], 3 - QUAD $0x04221654c40f4166 // pinsrw xmm2, word [r14 + rdx + 34], 4 - QUAD $0x05222e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 34], 5 + QUAD $0x04222654c40f4366 // pinsrw xmm2, word [r14 + r12 + 34], 4 + QUAD $0x05221654c40f4166 // pinsrw xmm2, word [r14 + rdx + 34], 5 QUAD $0x06221e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 34], 6 LONG $0xdb630f66 // packsswb xmm3, xmm3 QUAD $0x07220e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 34], 7 @@ -29420,18 +30679,18 @@ LBB5_133: LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4566; WORD $0xc110 // pblendvb xmm8, xmm9, xmm0 LONG $0x6e0f4166; BYTE $0xd0 // movd xmm2, r8d - LONG $0x64b70f45; WORD $0x323e // movzx r12d, word [r14 + rdi + 50] + LONG $0x6cb70f45; WORD $0x323e // movzx r13d, word [r14 + rdi + 50] QUAD $0x01200e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 32], 1 QUAD $0x02203e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 32], 2 QUAD $0x0320367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 32], 3 - QUAD $0x0420167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 32], 4 - QUAD $0x05202e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 32], 5 + QUAD $0x0420267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 32], 4 + QUAD $0x0520167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 32], 5 QUAD $0x06201e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 32], 6 QUAD $0x01240e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 36], 1 QUAD $0x02243e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 36], 2 QUAD $0x03243664c40f4166 // pinsrw xmm4, word [r14 + rsi + 36], 3 - QUAD $0x04241664c40f4166 // pinsrw xmm4, word [r14 + rdx + 36], 4 - QUAD $0x05242e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 36], 5 + QUAD $0x04242664c40f4366 // pinsrw xmm4, word [r14 + r12 + 36], 4 + QUAD $0x05241664c40f4166 // pinsrw xmm4, word [r14 + rdx + 36], 5 QUAD $0x06241e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 36], 6 QUAD $0x07240e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 36], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -29450,8 +30709,8 @@ LBB5_133: QUAD $0x01260e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 38], 1 QUAD $0x02263e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 38], 2 QUAD $0x0326364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 38], 3 - QUAD $0x0426164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 38], 4 - QUAD $0x05262e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 38], 5 + QUAD $0x0426264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 38], 4 + QUAD $0x0526164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 38], 5 QUAD $0x06261e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 38], 6 QUAD $0x07260e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 38], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -29459,15 +30718,15 @@ LBB5_133: QUAD $0x01280e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 40], 1 QUAD $0x02283e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 40], 2 QUAD $0x03283654c40f4166 // pinsrw xmm2, word [r14 + rsi + 40], 3 - QUAD $0x04281654c40f4166 // pinsrw xmm2, word [r14 + rdx + 40], 4 - QUAD $0x05282e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 40], 5 + QUAD $0x04282654c40f4366 // pinsrw xmm2, word [r14 + r12 + 40], 4 + QUAD $0x05281654c40f4166 // pinsrw xmm2, word [r14 + rdx + 40], 5 QUAD $0x06281e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 40], 6 QUAD $0x07280e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 40], 7 LONG $0xf80f4466; BYTE $0xc7 // psubb xmm8, xmm7 QUAD $0x000000a0ad6f0f66 // movdqa xmm5, oword 160[rbp] /* [rip + .LCPI5_10] */ LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xe910 // pblendvb xmm5, xmm9, xmm0 - LONG $0x4c6e0f66; WORD $0x2024 // movd xmm1, dword [rsp + 32] + LONG $0x4c6e0f66; WORD $0x1824 // movd xmm1, dword [rsp + 24] LONG $0x44b70f45; WORD $0x363e // movzx r8d, word [r14 + rdi + 54] LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -29480,8 +30739,8 @@ LBB5_133: QUAD $0x012a0e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 42], 1 QUAD $0x022a3e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 42], 2 QUAD $0x032a365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 42], 3 - QUAD $0x042a165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 42], 4 - QUAD $0x052a2e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 42], 5 + QUAD $0x042a265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 42], 4 + QUAD $0x052a165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 42], 5 QUAD $0x062a1e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 42], 6 QUAD $0x072a0e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 42], 7 LONG $0x750f4166; BYTE $0xdb // pcmpeqw xmm3, xmm11 @@ -29489,14 +30748,14 @@ LBB5_133: QUAD $0x012c0e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 44], 1 QUAD $0x022c3e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 44], 2 QUAD $0x032c364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 44], 3 - QUAD $0x042c164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 44], 4 - QUAD $0x052c2e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 44], 5 + QUAD $0x042c264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 44], 4 + QUAD $0x052c164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 44], 5 QUAD $0x062c1e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 44], 6 LONG $0xeb0f4166; BYTE $0xe8 // por xmm5, xmm8 LONG $0x6f0f4166; BYTE $0xd7 // movdqa xmm2, xmm15 LONG $0xc36f0f66 // movdqa xmm0, xmm3 LONG $0x380f4166; WORD $0xd110 // pblendvb xmm2, xmm9, xmm0 - LONG $0x7c6e0f66; WORD $0x2824 // movd xmm7, dword [rsp + 40] + LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] LONG $0x5cb70f45; WORD $0x3a3e // movzx r11d, word [r14 + rdi + 58] QUAD $0x072c0e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 44], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -29506,15 +30765,15 @@ LBB5_133: LONG $0x6f0f4166; BYTE $0xf7 // movdqa xmm6, xmm15 LONG $0xc16f0f66 // movdqa xmm0, xmm1 LONG $0x380f4166; WORD $0xf110 // pblendvb xmm6, xmm9, xmm0 - LONG $0x6e0f4166; BYTE $0xcc // movd xmm1, r12d - LONG $0x64b70f45; WORD $0x3c3e // movzx r12d, word [r14 + rdi + 60] + LONG $0x6e0f4166; BYTE $0xcd // movd xmm1, r13d + LONG $0x6cb70f45; WORD $0x3c3e // movzx r13d, word [r14 + rdi + 60] LONG $0xf2eb0f66 // por xmm6, xmm2 LONG $0x6e0f4166; BYTE $0xd2 // movd xmm2, r10d QUAD $0x012e0e64c40f4166 // pinsrw xmm4, word [r14 + rcx + 46], 1 QUAD $0x022e3e64c40f4366 // pinsrw xmm4, word [r14 + r15 + 46], 2 QUAD $0x032e3664c40f4166 // pinsrw xmm4, word [r14 + rsi + 46], 3 - QUAD $0x042e1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 46], 4 - QUAD $0x052e2e64c40f4366 // pinsrw xmm4, word [r14 + r13 + 46], 5 + QUAD $0x042e2664c40f4366 // pinsrw xmm4, word [r14 + r12 + 46], 4 + QUAD $0x052e1664c40f4166 // pinsrw xmm4, word [r14 + rdx + 46], 5 QUAD $0x062e1e64c40f4166 // pinsrw xmm4, word [r14 + rbx + 46], 6 QUAD $0x072e0e64c40f4366 // pinsrw xmm4, word [r14 + r9 + 46], 7 LONG $0x750f4166; BYTE $0xe3 // pcmpeqw xmm4, xmm11 @@ -29527,8 +30786,8 @@ LBB5_133: QUAD $0x01320e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 50], 1 QUAD $0x02323e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 50], 2 QUAD $0x0332364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 50], 3 - QUAD $0x0432164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 50], 4 - QUAD $0x05322e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 50], 5 + QUAD $0x0432264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 50], 4 + QUAD $0x0532164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 50], 5 QUAD $0x06321e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 50], 6 QUAD $0x07320e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 50], 7 LONG $0x750f4166; BYTE $0xcb // pcmpeqw xmm1, xmm11 @@ -29541,8 +30800,8 @@ LBB5_133: QUAD $0x01300e7cc40f4166 // pinsrw xmm7, word [r14 + rcx + 48], 1 QUAD $0x02303e7cc40f4366 // pinsrw xmm7, word [r14 + r15 + 48], 2 QUAD $0x0330367cc40f4166 // pinsrw xmm7, word [r14 + rsi + 48], 3 - QUAD $0x0430167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 48], 4 - QUAD $0x05302e7cc40f4366 // pinsrw xmm7, word [r14 + r13 + 48], 5 + QUAD $0x0430267cc40f4366 // pinsrw xmm7, word [r14 + r12 + 48], 4 + QUAD $0x0530167cc40f4166 // pinsrw xmm7, word [r14 + rdx + 48], 5 QUAD $0x06301e7cc40f4166 // pinsrw xmm7, word [r14 + rbx + 48], 6 QUAD $0x07300e7cc40f4366 // pinsrw xmm7, word [r14 + r9 + 48], 7 LONG $0x750f4166; BYTE $0xfb // pcmpeqw xmm7, xmm11 @@ -29550,8 +30809,8 @@ LBB5_133: QUAD $0x01340e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 52], 1 QUAD $0x02343e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 52], 2 QUAD $0x03343654c40f4166 // pinsrw xmm2, word [r14 + rsi + 52], 3 - QUAD $0x04341654c40f4166 // pinsrw xmm2, word [r14 + rdx + 52], 4 - QUAD $0x05342e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 52], 5 + QUAD $0x04342654c40f4366 // pinsrw xmm2, word [r14 + r12 + 52], 4 + QUAD $0x05341654c40f4166 // pinsrw xmm2, word [r14 + rdx + 52], 5 QUAD $0x06341e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 52], 6 LONG $0xff630f66 // packsswb xmm7, xmm7 QUAD $0x07340e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 52], 7 @@ -29559,8 +30818,8 @@ LBB5_133: QUAD $0x01360e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 54], 1 QUAD $0x02363e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 54], 2 QUAD $0x0336365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 54], 3 - QUAD $0x0436165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 54], 4 - QUAD $0x05362e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 54], 5 + QUAD $0x0436265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 54], 4 + QUAD $0x0536165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 54], 5 QUAD $0x06361e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 54], 6 LONG $0xd2630f66 // packsswb xmm2, xmm2 QUAD $0x07360e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 54], 7 @@ -29568,8 +30827,8 @@ LBB5_133: QUAD $0x01380e4cc40f4166 // pinsrw xmm1, word [r14 + rcx + 56], 1 QUAD $0x02383e4cc40f4366 // pinsrw xmm1, word [r14 + r15 + 56], 2 QUAD $0x0338364cc40f4166 // pinsrw xmm1, word [r14 + rsi + 56], 3 - QUAD $0x0438164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 56], 4 - QUAD $0x05382e4cc40f4366 // pinsrw xmm1, word [r14 + r13 + 56], 5 + QUAD $0x0438264cc40f4366 // pinsrw xmm1, word [r14 + r12 + 56], 4 + QUAD $0x0538164cc40f4166 // pinsrw xmm1, word [r14 + rdx + 56], 5 QUAD $0x06381e4cc40f4166 // pinsrw xmm1, word [r14 + rbx + 56], 6 LONG $0xdb630f66 // packsswb xmm3, xmm3 QUAD $0x07380e4cc40f4366 // pinsrw xmm1, word [r14 + r9 + 56], 7 @@ -29585,20 +30844,20 @@ LBB5_133: QUAD $0x013a0e54c40f4166 // pinsrw xmm2, word [r14 + rcx + 58], 1 QUAD $0x023a3e54c40f4366 // pinsrw xmm2, word [r14 + r15 + 58], 2 QUAD $0x033a3654c40f4166 // pinsrw xmm2, word [r14 + rsi + 58], 3 - QUAD $0x043a1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 58], 4 - QUAD $0x053a2e54c40f4366 // pinsrw xmm2, word [r14 + r13 + 58], 5 + QUAD $0x043a2654c40f4366 // pinsrw xmm2, word [r14 + r12 + 58], 4 + QUAD $0x053a1654c40f4166 // pinsrw xmm2, word [r14 + rdx + 58], 5 QUAD $0x063a1e54c40f4166 // pinsrw xmm2, word [r14 + rbx + 58], 6 QUAD $0x073a0e54c40f4366 // pinsrw xmm2, word [r14 + r9 + 58], 7 LONG $0xc9630f66 // packsswb xmm1, xmm1 LONG $0x750f4166; BYTE $0xd3 // pcmpeqw xmm2, xmm11 LONG $0xf5eb0f66 // por xmm6, xmm5 - LONG $0x6e0f4166; BYTE $0xdc // movd xmm3, r12d + LONG $0x6e0f4166; BYTE $0xdd // movd xmm3, r13d LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] QUAD $0x013c0e5cc40f4166 // pinsrw xmm3, word [r14 + rcx + 60], 1 QUAD $0x023c3e5cc40f4366 // pinsrw xmm3, word [r14 + r15 + 60], 2 QUAD $0x033c365cc40f4166 // pinsrw xmm3, word [r14 + rsi + 60], 3 - QUAD $0x043c165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 60], 4 - QUAD $0x053c2e5cc40f4366 // pinsrw xmm3, word [r14 + r13 + 60], 5 + QUAD $0x043c265cc40f4366 // pinsrw xmm3, word [r14 + r12 + 60], 4 + QUAD $0x053c165cc40f4166 // pinsrw xmm3, word [r14 + rdx + 60], 5 QUAD $0x063c1e5cc40f4166 // pinsrw xmm3, word [r14 + rbx + 60], 6 LONG $0xd2630f66 // packsswb xmm2, xmm2 QUAD $0x073c0e5cc40f4366 // pinsrw xmm3, word [r14 + r9 + 60], 7 @@ -29621,8 +30880,8 @@ LBB5_133: QUAD $0x013e0e44c40f4166 // pinsrw xmm0, word [r14 + rcx + 62], 1 QUAD $0x023e3e44c40f4366 // pinsrw xmm0, word [r14 + r15 + 62], 2 QUAD $0x033e3644c40f4166 // pinsrw xmm0, word [r14 + rsi + 62], 3 - QUAD $0x043e1644c40f4166 // pinsrw xmm0, word [r14 + rdx + 62], 4 - QUAD $0x053e2e44c40f4366 // pinsrw xmm0, word [r14 + r13 + 62], 5 + QUAD $0x043e2644c40f4366 // pinsrw xmm0, word [r14 + r12 + 62], 4 + QUAD $0x053e1644c40f4166 // pinsrw xmm0, word [r14 + rdx + 62], 5 QUAD $0x063e1e44c40f4166 // pinsrw xmm0, word [r14 + rbx + 62], 6 QUAD $0x073e0e44c40f4366 // pinsrw xmm0, word [r14 + r9 + 62], 7 LONG $0x750f4166; BYTE $0xc3 // pcmpeqw xmm0, xmm11 @@ -29647,25 +30906,25 @@ LBB5_133: LONG $0x7f0f41f3; WORD $0x8844; BYTE $0x10 // movdqu oword [r8 + 4*rcx + 16], xmm0 LONG $0x08c18348 // add rcx, 8 WORD $0x8948; BYTE $0xcf // mov rdi, rcx - LONG $0x244c3b48; BYTE $0x18 // cmp rcx, qword [rsp + 24] - JNE LBB5_133 - QUAD $0x000000e024948b4c // mov r10, qword [rsp + 224] - LONG $0x24543b4c; BYTE $0x18 // cmp r10, qword [rsp + 24] + LONG $0x244c3b48; BYTE $0x28 // cmp rcx, qword [rsp + 40] + JNE LBB5_134 + QUAD $0x000000d024948b4c // mov r10, qword [rsp + 208] + LONG $0x24543b4c; BYTE $0x28 // cmp r10, qword [rsp + 40] QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] LONG $0x245c8b44; BYTE $0x10 // mov r11d, dword [rsp + 16] LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - JNE LBB5_135 - JMP LBB5_138 + JNE LBB5_136 + JMP LBB5_139 -LBB5_180: +LBB5_181: WORD $0x894d; BYTE $0xd0 // mov r8, r10 LONG $0xfce08349 // and r8, -4 WORD $0x894c; BYTE $0xc3 // mov rbx, r8 LONG $0x07e3c148 // shl rbx, 7 WORD $0x014c; BYTE $0xf3 // add rbx, r14 LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - LONG $0x801c8d4e // lea r11, [rax + 4*r8] + LONG $0x80248d4e // lea r12, [rax + 4*r8] WORD $0x280f; BYTE $0xc8 // movaps xmm1, xmm0 LONG $0x00c8c60f // shufps xmm1, xmm0, 0 LONG $0xfcc68149; WORD $0x0001; BYTE $0x00 // add r14, 508 @@ -29680,7 +30939,7 @@ LBB5_180: LONG $0x6f0f4466; WORD $0x704d // movdqa xmm9, oword 112[rbp] /* [rip + .LCPI5_7] */ LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] -LBB5_181: +LBB5_182: QUAD $0xfffe04b6100f41f3; BYTE $0xff // movss xmm6, dword [r14 - 508] QUAD $0xfffe08be100f41f3; BYTE $0xff // movss xmm7, dword [r14 - 504] QUAD $0xfffe0cae100f41f3; BYTE $0xff // movss xmm5, dword [r14 - 500] @@ -30025,10 +31284,10 @@ LBB5_181: LONG $0x04c18348 // add rcx, 4 LONG $0x00c68149; WORD $0x0002; BYTE $0x00 // add r14, 512 WORD $0x3949; BYTE $0xc8 // cmp r8, rcx - JNE LBB5_181 + JNE LBB5_182 WORD $0x394d; BYTE $0xc2 // cmp r10, r8 - JNE LBB5_183 - JMP LBB5_186 + JNE LBB5_184 + JMP LBB5_187 TEXT ·_comparison_greater_arr_arr_sse4(SB), $80-48 @@ -32763,7 +34022,7 @@ TEXT ·_comparison_greater_arr_scalar_sse4(SB), $360-48 LEAQ LCDATA5<>(SB), BP WORD $0x894d; BYTE $0xc3 // mov r11, r8 - WORD $0x8949; BYTE $0xcc // mov r12, rcx + LONG $0x240c8948 // mov qword [rsp], rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 JG LBB7_26 WORD $0xff83; BYTE $0x03 // cmp edi, 3 @@ -32771,9 +34030,9 @@ TEXT ·_comparison_greater_arr_scalar_sse4(SB), $360-48 WORD $0xff83; BYTE $0x04 // cmp edi, 4 JE LBB7_98 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB7_113 + JE LBB7_114 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB7_200 + JNE LBB7_202 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f538d4d // lea r10, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 @@ -32785,6 +34044,7 @@ TEXT ·_comparison_greater_arr_scalar_sse4(SB), $360-48 WORD $0x2941; BYTE $0xc1 // sub r9d, eax JE LBB7_17 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x240c8b4c // mov r9, qword [rsp] LBB7_15: WORD $0x3b44; BYTE $0x2e // cmp r13d, dword [rsi] @@ -32794,8 +34054,7 @@ LBB7_15: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - WORD $0x894d; BYTE $0xe1 // mov r9, r12 - LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] + LONG $0x04b60f45; BYTE $0x19 // movzx r8d, byte [r9 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -32804,11 +34063,11 @@ LBB7_15: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1c3c8841 // mov byte [r12 + rbx], dil + LONG $0x193c8841 // mov byte [r9 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB7_15 - LONG $0x01c48349 // add r12, 1 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB7_17: LONG $0x05fac149 // sar r10, 5 @@ -32816,36 +34075,35 @@ LBB7_17: JL LBB7_21 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 LBB7_19: - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - QUAD $0x000000c02494970f // seta byte [rsp + 192] + QUAD $0x000000d02494970f // seta byte [rsp + 208] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7970f40 // seta dil + LONG $0xd2970f41 // seta r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d - LONG $0xd6970f41 // seta r14b + LONG $0xd7970f40 // seta dil LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d - QUAD $0x000000d02494970f // seta byte [rsp + 208] + QUAD $0x000000b02494970f // seta byte [rsp + 176] LONG $0x106e3944 // cmp dword [rsi + 16], r13d - LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] LONG $0x146e3944 // cmp dword [rsi + 20], r13d LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] LONG $0x186e3944 // cmp dword [rsi + 24], r13d WORD $0x970f; BYTE $0xd0 // seta al LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d - WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0xd6970f41 // seta r14b LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x000000902494970f // seta byte [rsp + 144] + QUAD $0x000000802494970f // seta byte [rsp + 128] LONG $0x246e3944 // cmp dword [rsi + 36], r13d WORD $0x970f; BYTE $0xd2 // seta dl LONG $0x286e3944 // cmp dword [rsi + 40], r13d LONG $0xd1970f41 // seta r9b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2970f41 // seta r10b - LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3970f41 // seta r11b + LONG $0x306e3944 // cmp dword [rsi + 48], r13d + WORD $0x970f; BYTE $0xd3 // seta bl LONG $0x346e3944 // cmp dword [rsi + 52], r13d LONG $0xd4970f41 // seta r12b LONG $0x386e3944 // cmp dword [rsi + 56], r13d @@ -32853,126 +34111,127 @@ LBB7_19: LONG $0x3c6e3944 // cmp dword [rsi + 60], r13d WORD $0x970f; BYTE $0xd1 // seta cl LONG $0x406e3944 // cmp dword [rsi + 64], r13d - LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] LONG $0x446e3944 // cmp dword [rsi + 68], r13d - LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] + QUAD $0x000000902494970f // seta byte [rsp + 144] LONG $0x486e3944 // cmp dword [rsi + 72], r13d - LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] + LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] LONG $0x4c6e3944 // cmp dword [rsi + 76], r13d - LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] LONG $0x506e3944 // cmp dword [rsi + 80], r13d - LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] LONG $0x546e3944 // cmp dword [rsi + 84], r13d - LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] LONG $0x586e3944 // cmp dword [rsi + 88], r13d - LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d LONG $0xd7970f41 // seta r15b LONG $0x606e3944 // cmp dword [rsi + 96], r13d - LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] LONG $0x646e3944 // cmp dword [rsi + 100], r13d - LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] LONG $0x686e3944 // cmp dword [rsi + 104], r13d - LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d - LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] LONG $0x706e3944 // cmp dword [rsi + 112], r13d - LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] LONG $0x746e3944 // cmp dword [rsi + 116], r13d LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] LONG $0x786e3944 // cmp dword [rsi + 120], r13d - LONG $0x2414970f // seta byte [rsp] + LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d LONG $0xd0970f41 // seta r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0844; BYTE $0xd7 // or dil, r10b WORD $0xd200 // add dl, dl - LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + LONG $0x80249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x24148b4c // mov r10, qword [rsp] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0xc208 // or dl, al WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b + WORD $0x0841; BYTE $0xdc // or r12b, bl QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0x0841; BYTE $0xd6 // or r14b, dl WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] + LONG $0x48245402 // add dl, byte [rsp + 72] WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] + LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x241c8841 // mov byte [r12], bl - LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] + WORD $0x8845; BYTE $0x32 // mov byte [r10], r14b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + LONG $0x014a8841 // mov byte [r10 + 1], cl WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + LONG $0x20244c02 // add cl, byte [rsp + 32] WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl - LONG $0x2414b60f // movzx edx, byte [rsp] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xd0 // or r8b, dl WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b - LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b + LONG $0x027a8845 // mov byte [r10 + 2], r15b + LONG $0x03428845 // mov byte [r10 + 3], r8b LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c48349 // add r12, 4 - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 + LONG $0x04c28349 // add r10, 4 + LONG $0x2414894c // mov qword [rsp], r10 + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 JNE LBB7_19 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] @@ -32980,12 +34239,12 @@ LBB7_19: LBB7_21: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xda // add r10, r11 - JNE LBB7_135 + JNE LBB7_137 WORD $0x3145; BYTE $0xdb // xor r11d, r11d JMP LBB7_24 @@ -32993,14 +34252,14 @@ LBB7_26: WORD $0xff83; BYTE $0x08 // cmp edi, 8 JLE LBB7_27 WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB7_155 + JE LBB7_157 WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB7_170 + JE LBB7_172 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB7_200 - LONG $0x1f538d4d // lea r10, [r11 + 31] + JNE LBB7_202 + LONG $0x1f738d4d // lea r14, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xd3490f4d // cmovns r10, r11 + LONG $0xf3490f4d // cmovns r14, r11 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -33009,17 +34268,19 @@ LBB7_26: WORD $0x2941; BYTE $0xc1 // sub r9d, eax JE LBB7_49 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x24148b4c // mov r10, qword [rsp] LBB7_47: - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - LONG $0x08768d48 // lea rsi, [rsi + 8] - WORD $0xd219 // sbb edx, edx + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] + LONG $0x08c68348 // add rsi, 8 + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + LONG $0x0cb60f45; BYTE $0x3a // movzx r9d, byte [r10 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -33028,197 +34289,225 @@ LBB7_47: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x3a1c8841 // mov byte [r10 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB7_47 - LONG $0x01c48349 // add r12, 1 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB7_49: - LONG $0x05fac149 // sar r10, 5 + LONG $0x05fec149 // sar r14, 5 LONG $0x20fb8349 // cmp r11, 32 JL LBB7_53 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 - QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 + QUAD $0x000000c024b4894c // mov qword [rsp + 192], r14 + QUAD $0x000000d024b4894c // mov qword [rsp + 208], r14 LBB7_51: - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - QUAD $0x000000d02494920f // setb byte [rsp + 208] - LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] - LONG $0xd1920f41 // setb r9b - LONG $0x462e0f66; BYTE $0x10 // ucomisd xmm0, qword [rsi + 16] - LONG $0xd6920f41 // setb r14b - LONG $0x462e0f66; BYTE $0x18 // ucomisd xmm0, qword [rsi + 24] - LONG $0xd5920f41 // setb r13b - LONG $0x462e0f66; BYTE $0x20 // ucomisd xmm0, qword [rsi + 32] - LONG $0x2454920f; BYTE $0x70 // setb byte [rsp + 112] - LONG $0x462e0f66; BYTE $0x28 // ucomisd xmm0, qword [rsi + 40] - LONG $0x2454920f; BYTE $0x58 // setb byte [rsp + 88] - LONG $0x462e0f66; BYTE $0x30 // ucomisd xmm0, qword [rsi + 48] - WORD $0x920f; BYTE $0xd0 // setb al - LONG $0x462e0f66; BYTE $0x38 // ucomisd xmm0, qword [rsi + 56] - WORD $0x920f; BYTE $0xd3 // setb bl - LONG $0x462e0f66; BYTE $0x40 // ucomisd xmm0, qword [rsi + 64] - QUAD $0x000000a02494920f // setb byte [rsp + 160] - LONG $0x462e0f66; BYTE $0x48 // ucomisd xmm0, qword [rsi + 72] - WORD $0x920f; BYTE $0xd2 // setb dl - LONG $0x462e0f66; BYTE $0x50 // ucomisd xmm0, qword [rsi + 80] - LONG $0xd7920f40 // setb dil - LONG $0x462e0f66; BYTE $0x58 // ucomisd xmm0, qword [rsi + 88] - LONG $0xd2920f41 // setb r10b - LONG $0x462e0f66; BYTE $0x60 // ucomisd xmm0, qword [rsi + 96] - LONG $0xd3920f41 // setb r11b - LONG $0x462e0f66; BYTE $0x68 // ucomisd xmm0, qword [rsi + 104] - LONG $0xd4920f41 // setb r12b - LONG $0x462e0f66; BYTE $0x70 // ucomisd xmm0, qword [rsi + 112] - LONG $0x2454920f; BYTE $0x78 // setb byte [rsp + 120] - LONG $0x462e0f66; BYTE $0x78 // ucomisd xmm0, qword [rsi + 120] - WORD $0x920f; BYTE $0xd1 // setb cl - QUAD $0x00000080862e0f66 // ucomisd xmm0, qword [rsi + 128] - LONG $0x2454920f; BYTE $0x50 // setb byte [rsp + 80] - QUAD $0x00000088862e0f66 // ucomisd xmm0, qword [rsi + 136] - QUAD $0x000000902494920f // setb byte [rsp + 144] - QUAD $0x00000090862e0f66 // ucomisd xmm0, qword [rsi + 144] - LONG $0x2454920f; BYTE $0x68 // setb byte [rsp + 104] - QUAD $0x00000098862e0f66 // ucomisd xmm0, qword [rsi + 152] - LONG $0x2454920f; BYTE $0x60 // setb byte [rsp + 96] - QUAD $0x000000a0862e0f66 // ucomisd xmm0, qword [rsi + 160] - LONG $0x2454920f; BYTE $0x40 // setb byte [rsp + 64] - QUAD $0x000000a8862e0f66 // ucomisd xmm0, qword [rsi + 168] - LONG $0x2454920f; BYTE $0x48 // setb byte [rsp + 72] - QUAD $0x000000b0862e0f66 // ucomisd xmm0, qword [rsi + 176] - LONG $0x2454920f; BYTE $0x38 // setb byte [rsp + 56] - QUAD $0x000000b8862e0f66 // ucomisd xmm0, qword [rsi + 184] - LONG $0xd7920f41 // setb r15b - QUAD $0x000000c0862e0f66 // ucomisd xmm0, qword [rsi + 192] - LONG $0x2454920f; BYTE $0x08 // setb byte [rsp + 8] - QUAD $0x000000c8862e0f66 // ucomisd xmm0, qword [rsi + 200] - LONG $0x2454920f; BYTE $0x30 // setb byte [rsp + 48] - QUAD $0x000000d0862e0f66 // ucomisd xmm0, qword [rsi + 208] - LONG $0x2454920f; BYTE $0x18 // setb byte [rsp + 24] - QUAD $0x000000d8862e0f66 // ucomisd xmm0, qword [rsi + 216] - LONG $0x2454920f; BYTE $0x20 // setb byte [rsp + 32] - QUAD $0x000000e0862e0f66 // ucomisd xmm0, qword [rsi + 224] - LONG $0x2454920f; BYTE $0x28 // setb byte [rsp + 40] - QUAD $0x000000e8862e0f66 // ucomisd xmm0, qword [rsi + 232] - LONG $0x2454920f; BYTE $0x10 // setb byte [rsp + 16] - QUAD $0x000000f0862e0f66 // ucomisd xmm0, qword [rsi + 240] - LONG $0x2414920f // setb byte [rsp] - QUAD $0x000000f8862e0f66 // ucomisd xmm0, qword [rsi + 248] - LONG $0xd0920f41 // setb r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x000000d0248c0244 // add r9b, byte [rsp + 208] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] + LONG $0x56100ff2; BYTE $0x08 // movsd xmm2, qword [rsi + 8] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0x4e100ff2; BYTE $0x10 // movsd xmm1, qword [rsi + 16] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x4e100ff2; BYTE $0x18 // movsd xmm1, qword [rsi + 24] + LONG $0xd5970f41 // seta r13b + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0xd4970f41 // seta r12b + LONG $0x4e100ff2; BYTE $0x20 // movsd xmm1, qword [rsi + 32] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x4e100ff2; BYTE $0x28 // movsd xmm1, qword [rsi + 40] + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + LONG $0x4e100ff2; BYTE $0x30 // movsd xmm1, qword [rsi + 48] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x4e100ff2; BYTE $0x38 // movsd xmm1, qword [rsi + 56] + LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0x4e100ff2; BYTE $0x40 // movsd xmm1, qword [rsi + 64] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x4e100ff2; BYTE $0x48 // movsd xmm1, qword [rsi + 72] + LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + LONG $0x4e100ff2; BYTE $0x50 // movsd xmm1, qword [rsi + 80] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x4e100ff2; BYTE $0x58 // movsd xmm1, qword [rsi + 88] + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0x4e100ff2; BYTE $0x60 // movsd xmm1, qword [rsi + 96] + LONG $0x5e100ff2; BYTE $0x68 // movsd xmm3, qword [rsi + 104] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x6e100ff2; BYTE $0x70 // movsd xmm5, qword [rsi + 112] + LONG $0x76100ff2; BYTE $0x78 // movsd xmm6, qword [rsi + 120] + LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] + QUAD $0x00008086100f44f2; BYTE $0x00 // movsd xmm8, qword [rsi + 128] + QUAD $0x0000888e100f44f2; BYTE $0x00 // movsd xmm9, qword [rsi + 136] + LONG $0xd82e0f66 // ucomisd xmm3, xmm0 + QUAD $0x00009096100f44f2; BYTE $0x00 // movsd xmm10, qword [rsi + 144] + QUAD $0x0000989e100f44f2; BYTE $0x00 // movsd xmm11, qword [rsi + 152] + QUAD $0x000000902494970f // seta byte [rsp + 144] + QUAD $0x0000a0a6100f44f2; BYTE $0x00 // movsd xmm12, qword [rsi + 160] + QUAD $0x0000a8ae100f44f2; BYTE $0x00 // movsd xmm13, qword [rsi + 168] + LONG $0xe82e0f66 // ucomisd xmm5, xmm0 + QUAD $0x0000b0b6100f44f2; BYTE $0x00 // movsd xmm14, qword [rsi + 176] + QUAD $0x000000b896100ff2 // movsd xmm2, qword [rsi + 184] + QUAD $0x000000802494970f // seta byte [rsp + 128] + QUAD $0x000000c09e100ff2 // movsd xmm3, qword [rsi + 192] + QUAD $0x000000c8a6100ff2 // movsd xmm4, qword [rsi + 200] + LONG $0xf02e0f66 // ucomisd xmm6, xmm0 + QUAD $0x000000d0b6100ff2 // movsd xmm6, qword [rsi + 208] + QUAD $0x000000d8be100ff2 // movsd xmm7, qword [rsi + 216] + LONG $0xd3970f41 // seta r11b + QUAD $0x000000e08e100ff2 // movsd xmm1, qword [rsi + 224] + QUAD $0x000000e8ae100ff2 // movsd xmm5, qword [rsi + 232] + LONG $0x2e0f4466; BYTE $0xc0 // ucomisd xmm8, xmm0 + QUAD $0x000000b02494970f // seta byte [rsp + 176] + LONG $0x2e0f4466; BYTE $0xc8 // ucomisd xmm9, xmm0 + WORD $0x970f; BYTE $0xd1 // seta cl + LONG $0x2e0f4466; BYTE $0xd0 // ucomisd xmm10, xmm0 + LONG $0xd7970f40 // seta dil + LONG $0x2e0f4466; BYTE $0xd8 // ucomisd xmm11, xmm0 + LONG $0xd0970f41 // seta r8b + LONG $0x2e0f4466; BYTE $0xe0 // ucomisd xmm12, xmm0 + LONG $0xd2970f41 // seta r10b + LONG $0x2e0f4466; BYTE $0xe8 // ucomisd xmm13, xmm0 + LONG $0xd6970f41 // seta r14b + LONG $0x2e0f4466; BYTE $0xf0 // ucomisd xmm14, xmm0 + QUAD $0x000000a02494970f // seta byte [rsp + 160] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + LONG $0xd1970f41 // seta r9b + LONG $0xd82e0f66 // ucomisd xmm3, xmm0 + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] + LONG $0xe02e0f66 // ucomisd xmm4, xmm0 + LONG $0xd7970f41 // seta r15b + LONG $0xf02e0f66 // ucomisd xmm6, xmm0 + LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0xf82e0f66 // ucomisd xmm7, xmm0 + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0xe82e0f66 // ucomisd xmm5, xmm0 + QUAD $0x000000f08e100ff2 // movsd xmm1, qword [rsi + 240] + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] + QUAD $0x000000f88e100ff2 // movsd xmm1, qword [rsi + 248] + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0xdb00 // add bl, bl + LONG $0x18245c02 // add bl, byte [rsp + 24] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + WORD $0x8941; BYTE $0xdc // mov r12d, ebx + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + LONG $0x64b60f44; WORD $0x5824 // movzx r12d, byte [rsp + 88] + LONG $0x06e4c041 // shl r12b, 6 + LONG $0x6cb60f44; WORD $0x4024 // movzx r13d, byte [rsp + 64] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + WORD $0xc000 // add al, al + LONG $0x10244402 // add al, byte [rsp + 16] + LONG $0x64b60f44; WORD $0x3824 // movzx r12d, byte [rsp + 56] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x8944; BYTE $0xe0 // mov eax, r12d + LONG $0x64b60f44; WORD $0x2024 // movzx r12d, byte [rsp + 32] + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x0841; BYTE $0xdd // or r13b, bl + QUAD $0x00000090249cb60f // movzx ebx, byte [rsp + 144] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - LONG $0x247cb60f; BYTE $0x78 // movzx edi, byte [rsp + 120] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x1888 // mov byte [rax], bl - LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] - WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - WORD $0x4888; BYTE $0x01 // mov byte [rax + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e3c041 // shl r11b, 7 + WORD $0x0841; BYTE $0xc3 // or r11b, al WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx + LONG $0xb0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 176] + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x04e2c041 // shl r10b, 4 + WORD $0x0845; BYTE $0xc2 // or r10b, r8b + LONG $0x05e6c041 // shl r14b, 5 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0841; BYTE $0xdb // or r11b, bl + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e1c041 // shl r9b, 7 + WORD $0x0841; BYTE $0xc1 // or r9b, al + LONG $0x24048b48 // mov rax, qword [rsp] + WORD $0x8844; BYTE $0x28 // mov byte [rax], r13b + WORD $0x0845; BYTE $0xf1 // or r9b, r14b + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x68 // add r15b, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xf8 // or al, r15b + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc389 // mov ebx, eax + LONG $0x24048b48 // mov rax, qword [rsp] LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] + WORD $0xd908 // or cl, bl + WORD $0xcb89 // mov ebx, ecx + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2414b60f // movzx edx, byte [rsp] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x02788844 // mov byte [rax + 2], r15b - LONG $0x03408844 // mov byte [rax + 3], r8b - LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + WORD $0xd908 // or cl, bl + LONG $0x01588844 // mov byte [rax + 1], r11b + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0xe3c0; BYTE $0x06 // shl bl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xda08 // or dl, bl + LONG $0x02488844 // mov byte [rax + 2], r9b + WORD $0xca08 // or dl, cl + WORD $0x5088; BYTE $0x03 // mov byte [rax + 3], dl LONG $0x04c08348 // add rax, 4 - WORD $0x8949; BYTE $0xc4 // mov r12, rax - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + LONG $0x24048948 // mov qword [rsp], rax + QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 JNE LBB7_51 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] - QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] + QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] LBB7_53: - LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + LONG $0x05e6c149 // shl r14, 5 + WORD $0x394d; BYTE $0xde // cmp r14, r11 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 - WORD $0x294d; BYTE $0xd0 // sub r8, r10 - WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xda // add r10, r11 - JNE LBB7_193 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_195 + WORD $0x294d; BYTE $0xf0 // sub r8, r14 + WORD $0xf749; BYTE $0xd6 // not r14 + WORD $0x014d; BYTE $0xde // add r14, r11 + JNE LBB7_196 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + JMP LBB7_198 LBB7_2: WORD $0xff83; BYTE $0x02 // cmp edi, 2 JE LBB7_56 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB7_200 - WORD $0x8a44; BYTE $0x32 // mov r14b, byte [rdx] + JNE LBB7_202 + WORD $0x8a44; BYTE $0x22 // mov r12b, byte [rdx] LONG $0x1f538d4d // lea r10, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 LONG $0xd3490f4d // cmovns r10, r11 @@ -33229,9 +34518,10 @@ LBB7_2: WORD $0x2941; BYTE $0xc1 // sub r9d, eax JE LBB7_8 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x24348b4c // mov r14, qword [rsp] LBB7_6: - WORD $0x3844; BYTE $0x36 // cmp byte [rsi], r14b + WORD $0x3844; BYTE $0x26 // cmp byte [rsi], r12b LONG $0x01768d48 // lea rsi, [rsi + 1] WORD $0x9f0f; BYTE $0xd2 // setg dl WORD $0xdaf6 // neg dl @@ -33239,8 +34529,7 @@ LBB7_6: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - WORD $0x894d; BYTE $0xe7 // mov r15, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -33249,34 +34538,36 @@ LBB7_6: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB7_6 - LONG $0x01c48349 // add r12, 1 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB7_8: - LONG $0x05fac149 // sar r10, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x05fac149 // sar r10, 5 + LONG $0x20fb8349 // cmp r11, 32 JL LBB7_9 - LONG $0x10fa8349 // cmp r10, 16 - LONG $0x24348844 // mov byte [rsp], r14b - QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 - QUAD $0x000001202494894c // mov qword [rsp + 288], r10 + LONG $0x10fa8349 // cmp r10, 16 + LONG $0x24648844; BYTE $0x08 // mov byte [rsp + 8], r12b + QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 + QUAD $0x000001202494894c // mov qword [rsp + 288], r10 JB LBB7_81 - WORD $0x894c; BYTE $0xd0 // mov rax, r10 - LONG $0x05e0c148 // shl rax, 5 - WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc4 // cmp r12, rax + WORD $0x894c; BYTE $0xd0 // mov rax, r10 + LONG $0x05e0c148 // shl rax, 5 + WORD $0x0148; BYTE $0xf0 // add rax, rsi + LONG $0x24043948 // cmp qword [rsp], rax JAE LBB7_84 - LONG $0x94048d4b // lea rax, [r12 + 4*r10] - WORD $0x3948; BYTE $0xc6 // cmp rsi, rax + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x90048d4a // lea rax, [rax + 4*r10] + WORD $0x3948; BYTE $0xc6 // cmp rsi, rax JAE LBB7_84 LBB7_81: WORD $0xc031 // xor eax, eax QUAD $0x000000e824848948 // mov qword [rsp + 232], rax - LONG $0x2464894c; BYTE $0x58 // mov qword [rsp + 88], r12 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax LBB7_87: QUAD $0x000000e824942b4c // sub r10, qword [rsp + 232] @@ -33284,170 +34575,179 @@ LBB7_87: LBB7_88: WORD $0x8948; BYTE $0xf1 // mov rcx, rsi - WORD $0x3844; BYTE $0x36 // cmp byte [rsi], r14b - QUAD $0x000000b024949f0f // setg byte [rsp + 176] - LONG $0x01763844 // cmp byte [rsi + 1], r14b + WORD $0x3844; BYTE $0x26 // cmp byte [rsi], r12b + QUAD $0x000000c024949f0f // setg byte [rsp + 192] + LONG $0x01663844 // cmp byte [rsi + 1], r12b LONG $0xd69f0f40 // setg sil - LONG $0x02713844 // cmp byte [rcx + 2], r14b + LONG $0x02613844 // cmp byte [rcx + 2], r12b LONG $0xd79f0f41 // setg r15b - LONG $0x03713844 // cmp byte [rcx + 3], r14b + LONG $0x03613844 // cmp byte [rcx + 3], r12b LONG $0xd49f0f41 // setg r12b - LONG $0x04713844 // cmp byte [rcx + 4], r14b + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x04 // cmp byte [rcx + 4], al + QUAD $0x000000b024949f0f // setg byte [rsp + 176] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x05 // cmp byte [rcx + 5], al + LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x06 // cmp byte [rcx + 6], al QUAD $0x000000d024949f0f // setg byte [rsp + 208] - LONG $0x05713844 // cmp byte [rcx + 5], r14b - LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] - LONG $0x06713844 // cmp byte [rcx + 6], r14b - QUAD $0x000000c024949f0f // setg byte [rsp + 192] - LONG $0x07713844 // cmp byte [rcx + 7], r14b + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x07 // cmp byte [rcx + 7], al LONG $0xd19f0f41 // setg r9b - LONG $0x08713844 // cmp byte [rcx + 8], r14b - QUAD $0x0000009024949f0f // setg byte [rsp + 144] - LONG $0x09713844 // cmp byte [rcx + 9], r14b + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x08 // cmp byte [rcx + 8], al + QUAD $0x0000008024949f0f // setg byte [rsp + 128] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x09 // cmp byte [rcx + 9], al WORD $0x9f0f; BYTE $0xd2 // setg dl - LONG $0x0a713844 // cmp byte [rcx + 10], r14b + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x0a // cmp byte [rcx + 10], al LONG $0xd79f0f40 // setg dil - LONG $0x0b713844 // cmp byte [rcx + 11], r14b + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x0b // cmp byte [rcx + 11], al LONG $0xd29f0f41 // setg r10b - LONG $0x0c713844 // cmp byte [rcx + 12], r14b + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0x4138; BYTE $0x0c // cmp byte [rcx + 12], al LONG $0xd69f0f41 // setg r14b - LONG $0x2404b60f // movzx eax, byte [rsp] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x0d // cmp byte [rcx + 13], al LONG $0xd59f0f41 // setg r13b - LONG $0x2404b60f // movzx eax, byte [rsp] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x0e // cmp byte [rcx + 14], al QUAD $0x000000a024949f0f // setg byte [rsp + 160] - LONG $0x2404b60f // movzx eax, byte [rsp] + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x0f // cmp byte [rcx + 15], al LONG $0xd09f0f41 // setg r8b - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x10 // cmp byte [rcx + 16], bl - LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] - LONG $0x241cb60f // movzx ebx, byte [rsp] + QUAD $0x0000009024949f0f // setg byte [rsp + 144] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x11 // cmp byte [rcx + 17], bl - LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] - LONG $0x241cb60f // movzx ebx, byte [rsp] - WORD $0x5938; BYTE $0x12 // cmp byte [rcx + 18], bl LONG $0x24549f0f; BYTE $0x70 // setg byte [rsp + 112] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0x5938; BYTE $0x12 // cmp byte [rcx + 18], bl + LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x13 // cmp byte [rcx + 19], bl - LONG $0x24549f0f; BYTE $0x60 // setg byte [rsp + 96] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x14 // cmp byte [rcx + 20], bl - LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] - LONG $0x241cb60f // movzx ebx, byte [rsp] - WORD $0x5938; BYTE $0x15 // cmp byte [rcx + 21], bl LONG $0x24549f0f; BYTE $0x48 // setg byte [rsp + 72] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] + WORD $0x5938; BYTE $0x15 // cmp byte [rcx + 21], bl + LONG $0x24549f0f; BYTE $0x58 // setg byte [rsp + 88] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x16 // cmp byte [rcx + 22], bl - LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x17 // cmp byte [rcx + 23], bl LONG $0xd39f0f41 // setg r11b - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x18 // cmp byte [rcx + 24], bl - LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x19 // cmp byte [rcx + 25], bl - LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1a // cmp byte [rcx + 26], bl - LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1b // cmp byte [rcx + 27], bl - LONG $0x24549f0f; BYTE $0x08 // setg byte [rsp + 8] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1c // cmp byte [rcx + 28], bl - LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1d // cmp byte [rcx + 29], bl LONG $0x24549f0f; BYTE $0x10 // setg byte [rsp + 16] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1e // cmp byte [rcx + 30], bl - QUAD $0x0000008024949f0f // setg byte [rsp + 128] - LONG $0x241cb60f // movzx ebx, byte [rsp] + LONG $0x24149f0f // setg byte [rsp] + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x1f // cmp byte [rcx + 31], bl WORD $0x9f0f; BYTE $0xd3 // setg bl WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000b024b40240 // add sil, byte [rsp + 176] - QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e1c041 // shl r9b, 7 WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xd200 // add dl, dl - LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] + LONG $0x80249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 128] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b + LONG $0x64b60f44; WORD $0x0824 // movzx r12d, byte [rsp + 8] LONG $0x03e2c041 // shl r10b, 3 WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x2454b60f; BYTE $0x38 // movzx edx, byte [rsp + 56] + LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0xc208 // or dl, al LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x05e5c041 // shl r13b, 5 WORD $0x0845; BYTE $0xf5 // or r13b, r14b - LONG $0x34b60f44; BYTE $0x24 // movzx r14d, byte [rsp] QUAD $0x000000a024b4b60f // movzx esi, byte [rsp + 160] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xf0 // or r8b, sil WORD $0x0841; BYTE $0xd1 // or r9b, dl WORD $0x0845; BYTE $0xe8 // or r8b, r13b - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xd200 // add dl, dl - LONG $0x78245402 // add dl, byte [rsp + 120] + LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] + LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] + LONG $0x24548b48; BYTE $0x60 // mov rdx, qword [rsp + 96] WORD $0x8844; BYTE $0x0a // mov byte [rdx], r9b - LONG $0x247cb60f; BYTE $0x40 // movzx edi, byte [rsp + 64] + LONG $0x247cb60f; BYTE $0x50 // movzx edi, byte [rsp + 80] LONG $0x06e7c040 // shl dil, 6 LONG $0x07e3c041 // shl r11b, 7 WORD $0x0841; BYTE $0xfb // or r11b, dil LONG $0x01428844 // mov byte [rdx + 1], r8b WORD $0x0841; BYTE $0xf3 // or r11b, sil - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xc000 // add al, al - LONG $0x30244402 // add al, byte [rsp + 48] + LONG $0x38244402 // add al, byte [rsp + 56] WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0840; BYTE $0xf0 // or al, sil WORD $0xc689 // mov esi, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0x0840; BYTE $0xf0 // or al, sil - QUAD $0x0000008024b4b60f // movzx esi, byte [rsp + 128] + LONG $0x2434b60f // movzx esi, byte [rsp] LONG $0x06e6c040 // shl sil, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0x0840; BYTE $0xf3 // or bl, sil @@ -33456,7 +34756,7 @@ LBB7_88: WORD $0x5a88; BYTE $0x03 // mov byte [rdx + 3], bl LONG $0x20718d48 // lea rsi, [rcx + 32] LONG $0x04c28348 // add rdx, 4 - LONG $0x24548948; BYTE $0x58 // mov qword [rsp + 88], rdx + LONG $0x24548948; BYTE $0x60 // mov qword [rsp + 96], rdx QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 JNE LBB7_88 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] @@ -33465,9 +34765,9 @@ LBB7_88: LBB7_27: WORD $0xff83; BYTE $0x07 // cmp edi, 7 - JE LBB7_137 + JE LBB7_139 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB7_200 + JNE LBB7_202 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f538d4d // lea r10, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 @@ -33479,6 +34779,7 @@ LBB7_27: WORD $0x2941; BYTE $0xc1 // sub r9d, eax JE LBB7_33 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x240c8b4c // mov r9, qword [rsp] LBB7_31: WORD $0x3b4c; BYTE $0x2e // cmp r13, qword [rsi] @@ -33488,8 +34789,7 @@ LBB7_31: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - WORD $0x894d; BYTE $0xe1 // mov r9, r12 - LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] + LONG $0x04b60f45; BYTE $0x19 // movzx r8d, byte [r9 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -33498,11 +34798,11 @@ LBB7_31: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1c3c8841 // mov byte [r12 + rbx], dil + LONG $0x193c8841 // mov byte [r9 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB7_31 - LONG $0x01c48349 // add r12, 1 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB7_33: LONG $0x05fac149 // sar r10, 5 @@ -33510,36 +34810,35 @@ LBB7_33: JL LBB7_37 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 LBB7_35: - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - QUAD $0x000000c02494970f // seta byte [rsp + 192] + QUAD $0x000000d02494970f // seta byte [rsp + 208] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7970f40 // seta dil + LONG $0xd2970f41 // seta r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 - LONG $0xd6970f41 // seta r14b + LONG $0xd7970f40 // seta dil LONG $0x186e394c // cmp qword [rsi + 24], r13 - QUAD $0x000000d02494970f // seta byte [rsp + 208] + QUAD $0x000000b02494970f // seta byte [rsp + 176] LONG $0x206e394c // cmp qword [rsi + 32], r13 - LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] LONG $0x286e394c // cmp qword [rsi + 40], r13 LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] LONG $0x306e394c // cmp qword [rsi + 48], r13 WORD $0x970f; BYTE $0xd0 // seta al LONG $0x386e394c // cmp qword [rsi + 56], r13 - WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0xd6970f41 // seta r14b LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x000000902494970f // seta byte [rsp + 144] + QUAD $0x000000802494970f // seta byte [rsp + 128] LONG $0x486e394c // cmp qword [rsi + 72], r13 WORD $0x970f; BYTE $0xd2 // seta dl LONG $0x506e394c // cmp qword [rsi + 80], r13 LONG $0xd1970f41 // seta r9b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2970f41 // seta r10b - LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3970f41 // seta r11b + LONG $0x606e394c // cmp qword [rsi + 96], r13 + WORD $0x970f; BYTE $0xd3 // seta bl LONG $0x686e394c // cmp qword [rsi + 104], r13 LONG $0xd4970f41 // seta r12b LONG $0x706e394c // cmp qword [rsi + 112], r13 @@ -33547,126 +34846,127 @@ LBB7_35: LONG $0x786e394c // cmp qword [rsi + 120], r13 WORD $0x970f; BYTE $0xd1 // seta cl LONG $0x80ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 128], r13 - LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] LONG $0x88ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 136], r13 - LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] + QUAD $0x000000902494970f // seta byte [rsp + 144] LONG $0x90ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 144], r13 - LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] + LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] LONG $0x98ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 152], r13 - LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] LONG $0xa0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 160], r13 - LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] LONG $0xa8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 168], r13 - LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 - LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 LONG $0xd7970f41 // seta r15b LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 - LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 - LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 - LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 - LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 - LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 - LONG $0x2414970f // seta byte [rsp] + LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 LONG $0xd0970f41 // seta r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0844; BYTE $0xd7 // or dil, r10b WORD $0xd200 // add dl, dl - LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + LONG $0x80249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x24148b4c // mov r10, qword [rsp] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0xc208 // or dl, al WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b + WORD $0x0841; BYTE $0xdc // or r12b, bl QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0x0841; BYTE $0xd6 // or r14b, dl WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] + LONG $0x48245402 // add dl, byte [rsp + 72] WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] + LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x241c8841 // mov byte [r12], bl - LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] + WORD $0x8845; BYTE $0x32 // mov byte [r10], r14b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + LONG $0x014a8841 // mov byte [r10 + 1], cl WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + LONG $0x20244c02 // add cl, byte [rsp + 32] WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl - LONG $0x2414b60f // movzx edx, byte [rsp] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xd0 // or r8b, dl WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b - LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b + LONG $0x027a8845 // mov byte [r10 + 2], r15b + LONG $0x03428845 // mov byte [r10 + 3], r8b LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c48349 // add r12, 4 - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 + LONG $0x04c28349 // add r10, 4 + LONG $0x2414894c // mov qword [rsp], r10 + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 JNE LBB7_35 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] @@ -33674,21 +34974,20 @@ LBB7_35: LBB7_37: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xda // add r10, r11 - JNE LBB7_153 + JNE LBB7_155 WORD $0x3145; BYTE $0xdb // xor r11d, r11d JMP LBB7_40 LBB7_56: - WORD $0x028a // mov al, byte [rdx] - LONG $0x28244488 // mov byte [rsp + 40], al - LONG $0x1f538d4d // lea r10, [r11 + 31] + WORD $0x8a44; BYTE $0x12 // mov r10b, byte [rdx] + LONG $0x1f7b8d4d // lea r15, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xd3490f4d // cmovns r10, r11 + LONG $0xfb490f4d // cmovns r15, r11 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d @@ -33696,18 +34995,17 @@ LBB7_56: WORD $0x2941; BYTE $0xc1 // sub r9d, eax JE LBB7_60 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x24348b4c // mov r14, qword [rsp] LBB7_58: - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] - WORD $0x0e3a // cmp cl, byte [rsi] + WORD $0x3a44; BYTE $0x16 // cmp r10b, byte [rsi] LONG $0x01768d48 // lea rsi, [rsi + 1] WORD $0xd219 // sbb edx, edx LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -33716,201 +35014,223 @@ LBB7_58: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB7_58 - LONG $0x01c48349 // add r12, 1 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB7_60: - LONG $0x05fac149 // sar r10, 5 - LONG $0x20fb8349 // cmp r11, 32 + LONG $0x05ffc149 // sar r15, 5 + LONG $0x20fb8349 // cmp r11, 32 JL LBB7_61 - LONG $0x10fa8349 // cmp r10, 16 - QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 - QUAD $0x000001082494894c // mov qword [rsp + 264], r10 + LONG $0x10ff8349 // cmp r15, 16 + LONG $0x24548844; BYTE $0x08 // mov byte [rsp + 8], r10b + QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 + QUAD $0x0000010824bc894c // mov qword [rsp + 264], r15 JB LBB7_63 - WORD $0x894c; BYTE $0xd0 // mov rax, r10 - LONG $0x05e0c148 // shl rax, 5 - WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc4 // cmp r12, rax + WORD $0x894c; BYTE $0xf8 // mov rax, r15 + LONG $0x05e0c148 // shl rax, 5 + WORD $0x0148; BYTE $0xf0 // add rax, rsi + LONG $0x24043948 // cmp qword [rsp], rax JAE LBB7_66 - LONG $0x94048d4b // lea rax, [r12 + 4*r10] - WORD $0x3948; BYTE $0xc6 // cmp rsi, rax + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0xb8048d4a // lea rax, [rax + 4*r15] + WORD $0x3948; BYTE $0xc6 // cmp rsi, rax JAE LBB7_66 LBB7_63: WORD $0xc031 // xor eax, eax QUAD $0x000000e824848948 // mov qword [rsp + 232], rax - WORD $0x8949; BYTE $0xf6 // mov r14, rsi - LONG $0x2464894c; BYTE $0x48 // mov qword [rsp + 72], r12 + WORD $0x8949; BYTE $0xf4 // mov r12, rsi + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax LBB7_69: - QUAD $0x000000e824942b4c // sub r10, qword [rsp + 232] - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 + QUAD $0x000000e824bc2b4c // sub r15, qword [rsp + 232] + QUAD $0x000000c024bc894c // mov qword [rsp + 192], r15 LBB7_70: - WORD $0x894c; BYTE $0xf1 // mov rcx, r14 - LONG $0x74b60f44; WORD $0x2824 // movzx r14d, byte [rsp + 40] - WORD $0x3844; BYTE $0x31 // cmp byte [rcx], r14b - QUAD $0x000000c02494970f // seta byte [rsp + 192] - LONG $0x01713844 // cmp byte [rcx + 1], r14b - LONG $0xd6970f40 // seta sil - LONG $0x02713844 // cmp byte [rcx + 2], r14b - LONG $0xd3970f41 // seta r11b - LONG $0x03713844 // cmp byte [rcx + 3], r14b - LONG $0xd7970f41 // seta r15b - LONG $0x04713844 // cmp byte [rcx + 4], r14b - QUAD $0x000000d02494970f // seta byte [rsp + 208] - LONG $0x05713844 // cmp byte [rcx + 5], r14b - LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] - LONG $0x06713844 // cmp byte [rcx + 6], r14b - WORD $0x970f; BYTE $0xd0 // seta al - LONG $0x07713844 // cmp byte [rcx + 7], r14b - LONG $0xd0970f41 // seta r8b - LONG $0x08713844 // cmp byte [rcx + 8], r14b - QUAD $0x000000902494970f // seta byte [rsp + 144] - LONG $0x09713844 // cmp byte [rcx + 9], r14b - WORD $0x970f; BYTE $0xd2 // seta dl - LONG $0x0a713844 // cmp byte [rcx + 10], r14b - LONG $0xd7970f40 // seta dil - LONG $0x0b713844 // cmp byte [rcx + 11], r14b - LONG $0xd1970f41 // seta r9b - LONG $0x0c713844 // cmp byte [rcx + 12], r14b - LONG $0xd2970f41 // seta r10b - LONG $0x0d713844 // cmp byte [rcx + 13], r14b - LONG $0xd4970f41 // seta r12b - LONG $0x0e713844 // cmp byte [rcx + 14], r14b - QUAD $0x000000a02494970f // seta byte [rsp + 160] - LONG $0x0f713844 // cmp byte [rcx + 15], r14b - WORD $0x970f; BYTE $0xd3 // seta bl - LONG $0x10713844 // cmp byte [rcx + 16], r14b - LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] - LONG $0x11713844 // cmp byte [rcx + 17], r14b - LONG $0xd5970f41 // seta r13b - LONG $0x12713844 // cmp byte [rcx + 18], r14b - LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] - LONG $0x13713844 // cmp byte [rcx + 19], r14b - LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] - LONG $0x14713844 // cmp byte [rcx + 20], r14b - LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] - LONG $0x15713844 // cmp byte [rcx + 21], r14b - LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] - LONG $0x16713844 // cmp byte [rcx + 22], r14b - LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] - LONG $0x17713844 // cmp byte [rcx + 23], r14b - LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] - LONG $0x18713844 // cmp byte [rcx + 24], r14b - LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] - LONG $0x19713844 // cmp byte [rcx + 25], r14b - LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] - LONG $0x1a713844 // cmp byte [rcx + 26], r14b - LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] - LONG $0x1b713844 // cmp byte [rcx + 27], r14b - LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] - LONG $0x1c713844 // cmp byte [rcx + 28], r14b - LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] - LONG $0x1d713844 // cmp byte [rcx + 29], r14b - QUAD $0x000000802494970f // seta byte [rsp + 128] - LONG $0x1e713844 // cmp byte [rcx + 30], r14b - LONG $0x2414970f // seta byte [rsp] - LONG $0x1f713844 // cmp byte [rcx + 31], r14b - LONG $0xd6970f41 // seta r14b - WORD $0x0040; BYTE $0xf6 // add sil, sil - QUAD $0x000000c024b40240 // add sil, byte [rsp + 192] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xc0 // or r8b, al - LONG $0x02e3c041 // shl r11b, 2 - WORD $0x0841; BYTE $0xf3 // or r11b, sil - WORD $0xd200 // add dl, dl - LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] - LONG $0x03e7c041 // shl r15b, 3 - WORD $0x0845; BYTE $0xdf // or r15b, r11b - LONG $0x02e7c040 // shl dil, 2 - WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0x0844; BYTE $0xf8 // or al, r15b - WORD $0xc289 // mov edx, eax - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0841; BYTE $0xf9 // or r9b, dil - LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x000000a02494b60f // movzx edx, byte [rsp + 160] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xd308 // or bl, dl - WORD $0x0841; BYTE $0xc0 // or r8b, al - WORD $0x0844; BYTE $0xe3 // or bl, r12b - WORD $0x0045; BYTE $0xed // add r13b, r13b - LONG $0x246c0244; BYTE $0x68 // add r13b, byte [rsp + 104] - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] - WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0x0844; BYTE $0xe8 // or al, r13b - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] - WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] - WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x40 // movzx eax, byte [rsp + 64] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - WORD $0xc689 // mov esi, eax - LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] - WORD $0x8844; BYTE $0x00 // mov byte [rax], r8b - LONG $0x247cb60f; BYTE $0x38 // movzx edi, byte [rsp + 56] - LONG $0x06e7c040 // shl dil, 6 - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x07 // shl dl, 7 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x5888; BYTE $0x01 // mov byte [rax + 1], bl - WORD $0x0840; BYTE $0xf2 // or dl, sil - LONG $0x245cb60f; BYTE $0x30 // movzx ebx, byte [rsp + 48] - WORD $0xdb00 // add bl, bl - LONG $0x18245c02 // add bl, byte [rsp + 24] - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x20 // movzx ebx, byte [rsp + 32] - WORD $0xe3c0; BYTE $0x02 // shl bl, 2 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] - WORD $0xe3c0; BYTE $0x03 // shl bl, 3 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - LONG $0x245cb60f; BYTE $0x10 // movzx ebx, byte [rsp + 16] - WORD $0xe3c0; BYTE $0x04 // shl bl, 4 - WORD $0x0840; BYTE $0xf3 // or bl, sil - WORD $0xde89 // mov esi, ebx - QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] - WORD $0xe3c0; BYTE $0x05 // shl bl, 5 - WORD $0x0840; BYTE $0xf3 // or bl, sil - LONG $0x2434b60f // movzx esi, byte [rsp] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e6c041 // shl r14b, 7 - WORD $0x0841; BYTE $0xf6 // or r14b, sil - WORD $0x0841; BYTE $0xde // or r14b, bl - WORD $0x5088; BYTE $0x02 // mov byte [rax + 2], dl - LONG $0x03708844 // mov byte [rax + 3], r14b - LONG $0x20718d4c // lea r14, [rcx + 32] - LONG $0x04c08348 // add rax, 4 - LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 + WORD $0x894c; BYTE $0xe1 // mov rcx, r12 + LONG $0x24143845 // cmp byte [r12], r10b + QUAD $0x000000d02494970f // seta byte [rsp + 208] + LONG $0x24543845; BYTE $0x01 // cmp byte [r12 + 1], r10b + LONG $0xd6970f41 // seta r14b + LONG $0x24543845; BYTE $0x02 // cmp byte [r12 + 2], r10b + LONG $0xd3970f41 // seta r11b + LONG $0x24543845; BYTE $0x03 // cmp byte [r12 + 3], r10b + LONG $0xd7970f41 // seta r15b + LONG $0x24543845; BYTE $0x04 // cmp byte [r12 + 4], r10b + QUAD $0x000000b02494970f // seta byte [rsp + 176] + LONG $0x24543845; BYTE $0x05 // cmp byte [r12 + 5], r10b + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + LONG $0x24543845; BYTE $0x06 // cmp byte [r12 + 6], r10b + WORD $0x970f; BYTE $0xd0 // seta al + LONG $0x24543845; BYTE $0x07 // cmp byte [r12 + 7], r10b + LONG $0xd0970f41 // seta r8b + LONG $0x24543845; BYTE $0x08 // cmp byte [r12 + 8], r10b + QUAD $0x000000802494970f // seta byte [rsp + 128] + LONG $0x24543845; BYTE $0x09 // cmp byte [r12 + 9], r10b + LONG $0xd6970f40 // seta sil + LONG $0x24543845; BYTE $0x0a // cmp byte [r12 + 10], r10b + LONG $0xd7970f40 // seta dil + LONG $0x24543845; BYTE $0x0b // cmp byte [r12 + 11], r10b + LONG $0xd1970f41 // seta r9b + LONG $0x24543845; BYTE $0x0c // cmp byte [r12 + 12], r10b + LONG $0xd2970f41 // seta r10b + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + LONG $0x24543841; BYTE $0x0d // cmp byte [r12 + 13], dl + LONG $0xd4970f41 // seta r12b + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x0e // cmp byte [rcx + 14], dl + QUAD $0x000000a02494970f // seta byte [rsp + 160] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x0f // cmp byte [rcx + 15], dl + WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x10 // cmp byte [rcx + 16], dl + QUAD $0x000000902494970f // seta byte [rsp + 144] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x11 // cmp byte [rcx + 17], dl + LONG $0xd5970f41 // seta r13b + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x12 // cmp byte [rcx + 18], dl + LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x13 // cmp byte [rcx + 19], dl + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x14 // cmp byte [rcx + 20], dl + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x15 // cmp byte [rcx + 21], dl + LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x16 // cmp byte [rcx + 22], dl + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x17 // cmp byte [rcx + 23], dl + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x18 // cmp byte [rcx + 24], dl + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x19 // cmp byte [rcx + 25], dl + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x1a // cmp byte [rcx + 26], dl + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x1b // cmp byte [rcx + 27], dl + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x1c // cmp byte [rcx + 28], dl + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x1d // cmp byte [rcx + 29], dl + LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x1e // cmp byte [rcx + 30], dl + LONG $0x2414970f // seta byte [rsp] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] + WORD $0x5138; BYTE $0x1f // cmp byte [rcx + 31], dl + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0x0045; BYTE $0xf6 // add r14b, r14b + QUAD $0x000000d024b40244 // add r14b, byte [rsp + 208] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e0c041 // shl r8b, 7 + WORD $0x0841; BYTE $0xc0 // or r8b, al + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0845; BYTE $0xf3 // or r11b, r14b + WORD $0x0040; BYTE $0xf6 // add sil, sil + QUAD $0x0000008024b40240 // add sil, byte [rsp + 128] + LONG $0x03e7c041 // shl r15b, 3 + WORD $0x0845; BYTE $0xdf // or r15b, r11b + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0840; BYTE $0xf7 // or dil, sil + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xf8 // or al, r15b + WORD $0xc689 // mov esi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0841; BYTE $0xf9 // or r9b, dil + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf0 // or al, sil + LONG $0x04e2c041 // shl r10b, 4 + WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x05e4c041 // shl r12b, 5 + WORD $0x0845; BYTE $0xd4 // or r12b, r10b + LONG $0x54b60f44; WORD $0x0824 // movzx r10d, byte [rsp + 8] + QUAD $0x000000a024b4b60f // movzx esi, byte [rsp + 160] + LONG $0x06e6c040 // shl sil, 6 + WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + WORD $0x0840; BYTE $0xf3 // or bl, sil + WORD $0x0841; BYTE $0xc0 // or r8b, al + WORD $0x0844; BYTE $0xe3 // or bl, r12b + WORD $0x0045; BYTE $0xed // add r13b, r13b + QUAD $0x0000009024ac0244 // add r13b, byte [rsp + 144] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0844; BYTE $0xe8 // or al, r13b + WORD $0xc689 // mov esi, eax + LONG $0x2444b60f; BYTE $0x68 // movzx eax, byte [rsp + 104] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf0 // or al, sil + WORD $0xc689 // mov esi, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf0 // or al, sil + WORD $0xc689 // mov esi, eax + LONG $0x2444b60f; BYTE $0x58 // movzx eax, byte [rsp + 88] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf0 // or al, sil + WORD $0x8941; BYTE $0xc1 // mov r9d, eax + LONG $0x24748b48; BYTE $0x78 // mov rsi, qword [rsp + 120] + WORD $0x8844; BYTE $0x06 // mov byte [rsi], r8b + LONG $0x247cb60f; BYTE $0x50 // movzx edi, byte [rsp + 80] + LONG $0x06e7c040 // shl dil, 6 + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] + WORD $0xe0c0; BYTE $0x07 // shl al, 7 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0x5e88; BYTE $0x01 // mov byte [rsi + 1], bl + WORD $0x0844; BYTE $0xc8 // or al, r9b + LONG $0x245cb60f; BYTE $0x28 // movzx ebx, byte [rsp + 40] + WORD $0xdb00 // add bl, bl + LONG $0x40245c02 // add bl, byte [rsp + 64] + WORD $0xdf89 // mov edi, ebx + LONG $0x245cb60f; BYTE $0x20 // movzx ebx, byte [rsp + 32] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0840; BYTE $0xfb // or bl, dil + WORD $0xdf89 // mov edi, ebx + LONG $0x245cb60f; BYTE $0x30 // movzx ebx, byte [rsp + 48] + WORD $0xe3c0; BYTE $0x03 // shl bl, 3 + WORD $0x0840; BYTE $0xfb // or bl, dil + WORD $0xdf89 // mov edi, ebx + LONG $0x245cb60f; BYTE $0x18 // movzx ebx, byte [rsp + 24] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0840; BYTE $0xfb // or bl, dil + WORD $0xdf89 // mov edi, ebx + LONG $0x245cb60f; BYTE $0x10 // movzx ebx, byte [rsp + 16] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x243cb60f // movzx edi, byte [rsp] + LONG $0x06e7c040 // shl dil, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0xda08 // or dl, bl + WORD $0x4688; BYTE $0x02 // mov byte [rsi + 2], al + WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl + LONG $0x20618d4c // lea r12, [rcx + 32] + LONG $0x04c68348 // add rsi, 4 + LONG $0x24748948; BYTE $0x78 // mov qword [rsp + 120], rsi + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 JNE LBB7_70 - QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] - QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] + QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] + QUAD $0x0000010824bc8b4c // mov r15, qword [rsp + 264] JMP LBB7_72 -LBB7_137: +LBB7_139: WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f538d4d // lea r10, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 @@ -33920,10 +35240,11 @@ LBB7_137: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_141 + JE LBB7_143 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x240c8b4c // mov r9, qword [rsp] -LBB7_139: +LBB7_141: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x04768d48 // lea rsi, [rsi + 4] WORD $0x9f0f; BYTE $0xd2 // setg dl @@ -33932,8 +35253,7 @@ LBB7_139: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - WORD $0x894d; BYTE $0xe1 // mov r9, r12 - LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] + LONG $0x04b60f45; BYTE $0x19 // movzx r8d, byte [r9 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -33942,48 +35262,47 @@ LBB7_139: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1c3c8841 // mov byte [r12 + rbx], dil + LONG $0x193c8841 // mov byte [r9 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_139 - LONG $0x01c48349 // add r12, 1 + JNE LBB7_141 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 -LBB7_141: +LBB7_143: LONG $0x05fac149 // sar r10, 5 LONG $0x20fb8349 // cmp r11, 32 - JL LBB7_145 + JL LBB7_147 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 -LBB7_143: - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 +LBB7_145: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - QUAD $0x000000c024949f0f // setg byte [rsp + 192] + QUAD $0x000000d024949f0f // setg byte [rsp + 208] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd79f0f40 // setg dil + LONG $0xd29f0f41 // setg r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d - LONG $0xd69f0f41 // setg r14b + LONG $0xd79f0f40 // setg dil LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d - QUAD $0x000000d024949f0f // setg byte [rsp + 208] + QUAD $0x000000b024949f0f // setg byte [rsp + 176] LONG $0x106e3944 // cmp dword [rsi + 16], r13d - LONG $0x24549f0f; BYTE $0x70 // setg byte [rsp + 112] + LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] LONG $0x146e3944 // cmp dword [rsi + 20], r13d LONG $0x24549f0f; BYTE $0x58 // setg byte [rsp + 88] LONG $0x186e3944 // cmp dword [rsi + 24], r13d WORD $0x9f0f; BYTE $0xd0 // setg al LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d - WORD $0x9f0f; BYTE $0xd3 // setg bl + LONG $0xd69f0f41 // setg r14b LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x0000009024949f0f // setg byte [rsp + 144] + QUAD $0x0000008024949f0f // setg byte [rsp + 128] LONG $0x246e3944 // cmp dword [rsi + 36], r13d WORD $0x9f0f; BYTE $0xd2 // setg dl LONG $0x286e3944 // cmp dword [rsi + 40], r13d LONG $0xd19f0f41 // setg r9b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd29f0f41 // setg r10b - LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd39f0f41 // setg r11b + LONG $0x306e3944 // cmp dword [rsi + 48], r13d + WORD $0x9f0f; BYTE $0xd3 // setg bl LONG $0x346e3944 // cmp dword [rsi + 52], r13d LONG $0xd49f0f41 // setg r12b LONG $0x386e3944 // cmp dword [rsi + 56], r13d @@ -33991,141 +35310,142 @@ LBB7_143: LONG $0x3c6e3944 // cmp dword [rsi + 60], r13d WORD $0x9f0f; BYTE $0xd1 // setg cl LONG $0x406e3944 // cmp dword [rsi + 64], r13d - LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] + LONG $0x24549f0f; BYTE $0x48 // setg byte [rsp + 72] LONG $0x446e3944 // cmp dword [rsi + 68], r13d - LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] + QUAD $0x0000009024949f0f // setg byte [rsp + 144] LONG $0x486e3944 // cmp dword [rsi + 72], r13d - LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] + LONG $0x24549f0f; BYTE $0x70 // setg byte [rsp + 112] LONG $0x4c6e3944 // cmp dword [rsi + 76], r13d - LONG $0x24549f0f; BYTE $0x60 // setg byte [rsp + 96] + LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] LONG $0x506e3944 // cmp dword [rsi + 80], r13d - LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] + LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] LONG $0x546e3944 // cmp dword [rsi + 84], r13d - LONG $0x24549f0f; BYTE $0x48 // setg byte [rsp + 72] + LONG $0x24549f0f; BYTE $0x60 // setg byte [rsp + 96] LONG $0x586e3944 // cmp dword [rsi + 88], r13d - LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] + LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d LONG $0xd79f0f41 // setg r15b LONG $0x606e3944 // cmp dword [rsi + 96], r13d - LONG $0x24549f0f; BYTE $0x08 // setg byte [rsp + 8] + LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] LONG $0x646e3944 // cmp dword [rsi + 100], r13d - LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] + LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] LONG $0x686e3944 // cmp dword [rsi + 104], r13d - LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] + LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d - LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] + LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] LONG $0x706e3944 // cmp dword [rsi + 112], r13d - LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] + LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] LONG $0x746e3944 // cmp dword [rsi + 116], r13d LONG $0x24549f0f; BYTE $0x10 // setg byte [rsp + 16] LONG $0x786e3944 // cmp dword [rsi + 120], r13d - LONG $0x24149f0f // setg byte [rsp] + LONG $0x24549f0f; BYTE $0x08 // setg byte [rsp + 8] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d LONG $0xd09f0f41 // setg r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0844; BYTE $0xd7 // or dil, r10b WORD $0xd200 // add dl, dl - LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + LONG $0x80249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x24148b4c // mov r10, qword [rsp] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0xc208 // or dl, al WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b + WORD $0x0841; BYTE $0xdc // or r12b, bl QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0x0841; BYTE $0xd6 // or r14b, dl WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] + LONG $0x48245402 // add dl, byte [rsp + 72] WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] + LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x241c8841 // mov byte [r12], bl - LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] + WORD $0x8845; BYTE $0x32 // mov byte [r10], r14b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + LONG $0x014a8841 // mov byte [r10 + 1], cl WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + LONG $0x20244c02 // add cl, byte [rsp + 32] WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl - LONG $0x2414b60f // movzx edx, byte [rsp] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xd0 // or r8b, dl WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b - LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b + LONG $0x027a8845 // mov byte [r10 + 2], r15b + LONG $0x03428845 // mov byte [r10 + 3], r8b LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 - LONG $0x04c48349 // add r12, 4 - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 - JNE LBB7_143 + LONG $0x04c28349 // add r10, 4 + LONG $0x2414894c // mov qword [rsp], r10 + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + JNE LBB7_145 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] -LBB7_145: +LBB7_147: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xda // add r10, r11 - JNE LBB7_151 + JNE LBB7_153 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_148 + JMP LBB7_150 LBB7_98: LONG $0x2ab70f44 // movzx r13d, word [rdx] @@ -34139,6 +35459,7 @@ LBB7_98: WORD $0x2941; BYTE $0xc1 // sub r9d, eax JE LBB7_102 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x24148b48 // mov rdx, qword [rsp] LBB7_100: LONG $0x2e3b4466 // cmp r13w, word [rsi] @@ -34148,8 +35469,7 @@ LBB7_100: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - WORD $0x894c; BYTE $0xe2 // mov rdx, r12 - LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] + LONG $0x04b60f44; BYTE $0x1a // movzx r8d, byte [rdx + rbx] WORD $0x3045; BYTE $0xc1 // xor r9b, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -34158,11 +35478,11 @@ LBB7_100: WORD $0xe7d3 // shl edi, cl WORD $0x2044; BYTE $0xcf // and dil, r9b WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1c3c8841 // mov byte [r12 + rbx], dil + LONG $0x1a3c8840 // mov byte [rdx + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 JNE LBB7_100 - LONG $0x01c48349 // add r12, 1 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 LBB7_102: LONG $0x05fac149 // sar r10, 5 @@ -34170,40 +35490,39 @@ LBB7_102: JL LBB7_106 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 LBB7_104: - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 LONG $0x2e394466 // cmp word [rsi], r13w - LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd7970f40 // seta dil + LONG $0xd2970f41 // seta r10b LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w - LONG $0xd6970f41 // seta r14b + LONG $0xd7970f40 // seta dil LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w - QUAD $0x000000c02494970f // seta byte [rsp + 192] + QUAD $0x000000d02494970f // seta byte [rsp + 208] LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w - QUAD $0x000000902494970f // seta byte [rsp + 144] + QUAD $0x000000802494970f // seta byte [rsp + 128] LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w - LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] + QUAD $0x000000902494970f // seta byte [rsp + 144] LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w WORD $0x970f; BYTE $0xd0 // seta al LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w - WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0xd6970f41 // seta r14b LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w - LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w WORD $0x970f; BYTE $0xd2 // seta dl LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w LONG $0xd1970f41 // seta r9b LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd2970f41 // seta r10b - LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w LONG $0xd3970f41 // seta r11b + LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w + WORD $0x970f; BYTE $0xd3 // seta bl LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w LONG $0xd4970f41 // seta r12b LONG $0x6e394466; BYTE $0x1c // cmp word [rsi + 28], r13w - QUAD $0x000000d02494970f // seta byte [rsp + 208] + QUAD $0x000000b02494970f // seta byte [rsp + 176] LONG $0x6e394466; BYTE $0x1e // cmp word [rsi + 30], r13w WORD $0x970f; BYTE $0xd1 // seta cl LONG $0x6e394466; BYTE $0x20 // cmp word [rsi + 32], r13w @@ -34211,122 +35530,123 @@ LBB7_104: LONG $0x6e394466; BYTE $0x22 // cmp word [rsi + 34], r13w QUAD $0x000000a02494970f // seta byte [rsp + 160] LONG $0x6e394466; BYTE $0x24 // cmp word [rsi + 36], r13w - LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] - LONG $0x6e394466; BYTE $0x26 // cmp word [rsi + 38], r13w LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0x6e394466; BYTE $0x26 // cmp word [rsi + 38], r13w + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] LONG $0x6e394466; BYTE $0x28 // cmp word [rsi + 40], r13w - LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] LONG $0x6e394466; BYTE $0x2a // cmp word [rsi + 42], r13w - LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] LONG $0x6e394466; BYTE $0x2c // cmp word [rsi + 44], r13w - LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] LONG $0x6e394466; BYTE $0x2e // cmp word [rsi + 46], r13w LONG $0xd7970f41 // seta r15b LONG $0x6e394466; BYTE $0x30 // cmp word [rsi + 48], r13w - LONG $0x2414970f // seta byte [rsp] + LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] LONG $0x6e394466; BYTE $0x32 // cmp word [rsi + 50], r13w - LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] LONG $0x6e394466; BYTE $0x34 // cmp word [rsi + 52], r13w - LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] LONG $0x6e394466; BYTE $0x36 // cmp word [rsi + 54], r13w - LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] LONG $0x6e394466; BYTE $0x38 // cmp word [rsi + 56], r13w - LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] LONG $0x6e394466; BYTE $0x3a // cmp word [rsi + 58], r13w - LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w - LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w LONG $0xd0970f41 // seta r8b - WORD $0x0040; BYTE $0xff // add dil, dil - LONG $0x247c0240; BYTE $0x58 // add dil, byte [rsp + 88] + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + LONG $0x24540244; BYTE $0x38 // add r10b, byte [rsp + 56] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0844; BYTE $0xd7 // or dil, r10b WORD $0xd200 // add dl, dl - LONG $0x20245402 // add dl, byte [rsp + 32] - QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + LONG $0x18245402 // add dl, byte [rsp + 24] + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x24148b4c // mov r10, qword [rsp] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0xc208 // or dl, al WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xcb // or r11b, r9b + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x000000d024bcb60f // movzx edi, byte [rsp + 208] + WORD $0x0841; BYTE $0xdc // or r12b, bl + QUAD $0x000000b024bcb60f // movzx edi, byte [rsp + 176] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0x0841; BYTE $0xd6 // or r14b, dl WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] QUAD $0x000000a02494b60f // movzx edx, byte [rsp + 160] WORD $0xd200 // add dl, dl LONG $0x10245402 // add dl, byte [rsp + 16] WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] + LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x241c8841 // mov byte [r12], bl - LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] + WORD $0x8845; BYTE $0x32 // mov byte [r10], r14b + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + LONG $0x014a8841 // mov byte [r10 + 1], cl WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + LONG $0x244cb60f; BYTE $0x58 // movzx ecx, byte [rsp + 88] WORD $0xc900 // add cl, cl - WORD $0x0c02; BYTE $0x24 // add cl, byte [rsp] + LONG $0x08244c02 // add cl, byte [rsp + 8] WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x28 // movzx edx, byte [rsp + 40] + LONG $0x2454b60f; BYTE $0x20 // movzx edx, byte [rsp + 32] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xd0 // or r8b, dl WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b - LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b + LONG $0x027a8845 // mov byte [r10 + 2], r15b + LONG $0x03428845 // mov byte [r10 + 3], r8b LONG $0x40c68348 // add rsi, 64 - LONG $0x04c48349 // add r12, 4 - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 + LONG $0x04c28349 // add r10, 4 + LONG $0x2414894c // mov qword [rsp], r10 + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 JNE LBB7_104 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] @@ -34334,31 +35654,32 @@ LBB7_104: LBB7_106: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xda // add r10, r11 - JNE LBB7_111 + JNE LBB7_112 WORD $0x3145; BYTE $0xdb // xor r11d, r11d JMP LBB7_109 -LBB7_113: +LBB7_114: WORD $0xb70f; BYTE $0x02 // movzx eax, word [rdx] LONG $0xf0248489; WORD $0x0000; BYTE $0x00 // mov dword [rsp + 240], eax - LONG $0x1f738d4d // lea r14, [r11 + 31] + LONG $0x1f7b8d4d // lea r15, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 - LONG $0xf3490f4d // cmovns r14, r11 + LONG $0xfb490f4d // cmovns r15, r11 LONG $0x07418d41 // lea eax, [r9 + 7] WORD $0x8545; BYTE $0xc9 // test r9d, r9d LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_117 + JE LBB7_118 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d QUAD $0x000000f024948b44 // mov r10d, dword [rsp + 240] + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_115: +LBB7_116: LONG $0x16394466 // cmp word [rsi], r10w LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x9f0f; BYTE $0xd2 // setg dl @@ -34367,8 +35688,7 @@ LBB7_115: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - WORD $0x894d; BYTE $0xe7 // mov r15, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -34377,51 +35697,53 @@ LBB7_115: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_115 - LONG $0x01c48349 // add r12, 1 + JNE LBB7_116 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 -LBB7_117: - LONG $0x05fec149 // sar r14, 5 +LBB7_118: + LONG $0x05ffc149 // sar r15, 5 LONG $0x20fb8349 // cmp r11, 32 - JL LBB7_128 - LONG $0x08fe8349 // cmp r14, 8 + JL LBB7_119 + LONG $0x08ff8349 // cmp r15, 8 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 - QUAD $0x0000011024b4894c // mov qword [rsp + 272], r14 - JB LBB7_119 - WORD $0x894c; BYTE $0xf0 // mov rax, r14 + QUAD $0x0000011024bc894c // mov qword [rsp + 272], r15 + JB LBB7_121 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc4 // cmp r12, rax - JAE LBB7_122 - LONG $0xb4048d4b // lea rax, [r12 + 4*r14] + LONG $0x24043948 // cmp qword [rsp], rax + JAE LBB7_124 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0xb8048d4a // lea rax, [rax + 4*r15] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB7_122 + JBE LBB7_124 -LBB7_119: +LBB7_121: WORD $0xc031 // xor eax, eax - LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax + LONG $0x24448948; BYTE $0x28 // mov qword [rsp + 40], rax + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_125: - LONG $0x2424894c // mov qword [rsp], r12 - LONG $0x24742b4c; BYTE $0x18 // sub r14, qword [rsp + 24] - QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 +LBB7_127: + LONG $0x2474894c; BYTE $0x08 // mov qword [rsp + 8], r14 + LONG $0x247c2b4c; BYTE $0x28 // sub r15, qword [rsp + 40] + QUAD $0x000000c024bc894c // mov qword [rsp + 192], r15 QUAD $0x000000f024ac8b44 // mov r13d, dword [rsp + 240] -LBB7_126: +LBB7_128: WORD $0x8949; BYTE $0xf3 // mov r11, rsi LONG $0x2e394466 // cmp word [rsi], r13w - QUAD $0x000000c024949f0f // setg byte [rsp + 192] + QUAD $0x000000d024949f0f // setg byte [rsp + 208] LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w LONG $0xd09f0f41 // setg r8b LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w LONG $0xd69f0f41 // setg r14b LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w - QUAD $0x000000d024949f0f // setg byte [rsp + 208] + QUAD $0x000000b024949f0f // setg byte [rsp + 176] LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w - LONG $0x24549f0f; BYTE $0x70 // setg byte [rsp + 112] + LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w LONG $0x24549f0f; BYTE $0x58 // setg byte [rsp + 88] LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w @@ -34429,7 +35751,7 @@ LBB7_126: LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w WORD $0x9f0f; BYTE $0xd3 // setg bl LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w - QUAD $0x0000009024949f0f // setg byte [rsp + 144] + QUAD $0x0000008024949f0f // setg byte [rsp + 128] LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w WORD $0x9f0f; BYTE $0xd1 // setg cl LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w @@ -34445,52 +35767,52 @@ LBB7_126: LONG $0x6b394566; BYTE $0x1e // cmp word [r11 + 30], r13w LONG $0xd79f0f40 // setg dil LONG $0x6b394566; BYTE $0x20 // cmp word [r11 + 32], r13w - LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] + LONG $0x24549f0f; BYTE $0x48 // setg byte [rsp + 72] LONG $0x6b394566; BYTE $0x22 // cmp word [r11 + 34], r13w - LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] + QUAD $0x0000009024949f0f // setg byte [rsp + 144] LONG $0x6b394566; BYTE $0x24 // cmp word [r11 + 36], r13w - LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] + LONG $0x24549f0f; BYTE $0x70 // setg byte [rsp + 112] LONG $0x6b394566; BYTE $0x26 // cmp word [r11 + 38], r13w - LONG $0x24549f0f; BYTE $0x60 // setg byte [rsp + 96] + LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] LONG $0x6b394566; BYTE $0x28 // cmp word [r11 + 40], r13w - LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] + LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] LONG $0x6b394566; BYTE $0x2a // cmp word [r11 + 42], r13w - LONG $0x24549f0f; BYTE $0x48 // setg byte [rsp + 72] + LONG $0x24549f0f; BYTE $0x60 // setg byte [rsp + 96] LONG $0x6b394566; BYTE $0x2c // cmp word [r11 + 44], r13w - LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] + LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] LONG $0x6b394566; BYTE $0x2e // cmp word [r11 + 46], r13w LONG $0xd79f0f41 // setg r15b LONG $0x6b394566; BYTE $0x30 // cmp word [r11 + 48], r13w - LONG $0x24549f0f; BYTE $0x08 // setg byte [rsp + 8] + LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] LONG $0x6b394566; BYTE $0x32 // cmp word [r11 + 50], r13w - LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] + LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] LONG $0x6b394566; BYTE $0x34 // cmp word [r11 + 52], r13w - LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] + LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w - LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] + LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] LONG $0x6b394566; BYTE $0x38 // cmp word [r11 + 56], r13w - LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] + LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] LONG $0x6b394566; BYTE $0x3a // cmp word [r11 + 58], r13w LONG $0x24549f0f; BYTE $0x10 // setg byte [rsp + 16] LONG $0x6b394566; BYTE $0x3c // cmp word [r11 + 60], r13w - QUAD $0x0000008024949f0f // setg byte [rsp + 128] + LONG $0x24149f0f // setg byte [rsp] LONG $0x6b394566; BYTE $0x3e // cmp word [r11 + 62], r13w WORD $0x9f0f; BYTE $0xd2 // setg dl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000c024840244 // add r8b, byte [rsp + 192] + QUAD $0x000000d024840244 // add r8b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xc6 // or r14b, r8b WORD $0xc900 // add cl, cl - LONG $0x90248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 144] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + LONG $0x80248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b LONG $0x02e6c040 // shl sil, 2 WORD $0x0840; BYTE $0xce // or sil, cl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + LONG $0x244cb60f; BYTE $0x68 // movzx ecx, byte [rsp + 104] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xc108 // or cl, al WORD $0x8941; BYTE $0xc8 // mov r8d, ecx @@ -34509,54 +35831,54 @@ LBB7_126: WORD $0x0840; BYTE $0xf7 // or dil, sil WORD $0xcb08 // or bl, cl WORD $0x0844; BYTE $0xe7 // or dil, r12b - LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] + QUAD $0x00000090248cb60f // movzx ecx, byte [rsp + 144] WORD $0xc900 // add cl, cl - LONG $0x50244c02 // add cl, byte [rsp + 80] + LONG $0x48244c02 // add cl, byte [rsp + 72] WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x68 // movzx ecx, byte [rsp + 104] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] + LONG $0x244cb60f; BYTE $0x78 // movzx ecx, byte [rsp + 120] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x40 // movzx ecx, byte [rsp + 64] + LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x244cb60f; BYTE $0x48 // movzx ecx, byte [rsp + 72] + LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - LONG $0x240c8b48 // mov rcx, qword [rsp] + LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] WORD $0x1988 // mov byte [rcx], bl - LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x01798840 // mov byte [rcx + 1], dil WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xc000 // add al, al - LONG $0x08244402 // add al, byte [rsp + 8] + LONG $0x20244402 // add al, byte [rsp + 32] WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] WORD $0xe0c0; BYTE $0x02 // shl al, 2 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0xd808 // or al, bl WORD $0xc389 // mov ebx, eax LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] WORD $0xe0c0; BYTE $0x05 // shl al, 5 WORD $0xd808 // or al, bl - QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + LONG $0x241cb60f // movzx ebx, byte [rsp] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 WORD $0xe2c0; BYTE $0x07 // shl dl, 7 WORD $0xda08 // or dl, bl @@ -34565,26 +35887,15 @@ LBB7_126: WORD $0x5188; BYTE $0x03 // mov byte [rcx + 3], dl LONG $0x40738d49 // lea rsi, [r11 + 64] LONG $0x04c18348 // add rcx, 4 - LONG $0x240c8948 // mov qword [rsp], rcx - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 - JNE LBB7_126 + LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + JNE LBB7_128 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] - QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] - LONG $0x24248b4c // mov r12, qword [rsp] - -LBB7_128: - LONG $0x05e6c149 // shl r14, 5 - WORD $0x394d; BYTE $0xde // cmp r14, r11 - JGE LBB7_200 - WORD $0x894d; BYTE $0xd8 // mov r8, r11 - WORD $0x294d; BYTE $0xf0 // sub r8, r14 - WORD $0xf749; BYTE $0xd6 // not r14 - WORD $0x014d; BYTE $0xde // add r14, r11 - JNE LBB7_133 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - JMP LBB7_131 + QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] + LONG $0x24748b4c; BYTE $0x08 // mov r14, qword [rsp + 8] + JMP LBB7_130 -LBB7_155: +LBB7_157: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f538d4d // lea r10, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 @@ -34594,10 +35905,11 @@ LBB7_155: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_159 + JE LBB7_161 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x240c8b4c // mov r9, qword [rsp] -LBB7_157: +LBB7_159: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] WORD $0x9f0f; BYTE $0xd2 // setg dl @@ -34606,8 +35918,7 @@ LBB7_157: WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xd8490f48 // cmovns rbx, rax LONG $0x03fbc148 // sar rbx, 3 - WORD $0x894d; BYTE $0xe1 // mov r9, r12 - LONG $0x04b60f45; BYTE $0x1c // movzx r8d, byte [r12 + rbx] + LONG $0x04b60f45; BYTE $0x19 // movzx r8d, byte [r9 + rbx] WORD $0x3044; BYTE $0xc2 // xor dl, r8b LONG $0x00dd3c8d; WORD $0x0000; BYTE $0x00 // lea edi, [8*rbx] WORD $0xc189 // mov ecx, eax @@ -34616,48 +35927,47 @@ LBB7_157: WORD $0xe7d3 // shl edi, cl WORD $0x2040; BYTE $0xd7 // and dil, dl WORD $0x3044; BYTE $0xc7 // xor dil, r8b - LONG $0x1c3c8841 // mov byte [r12 + rbx], dil + LONG $0x193c8841 // mov byte [r9 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_157 - LONG $0x01c48349 // add r12, 1 + JNE LBB7_159 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 -LBB7_159: +LBB7_161: LONG $0x05fac149 // sar r10, 5 LONG $0x20fb8349 // cmp r11, 32 - JL LBB7_163 + JL LBB7_165 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 QUAD $0x000000f02494894c // mov qword [rsp + 240], r10 - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 + QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 -LBB7_161: - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 +LBB7_163: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - QUAD $0x000000c024949f0f // setg byte [rsp + 192] + QUAD $0x000000d024949f0f // setg byte [rsp + 208] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd79f0f40 // setg dil + LONG $0xd29f0f41 // setg r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 - LONG $0xd69f0f41 // setg r14b + LONG $0xd79f0f40 // setg dil LONG $0x186e394c // cmp qword [rsi + 24], r13 - QUAD $0x000000d024949f0f // setg byte [rsp + 208] + QUAD $0x000000b024949f0f // setg byte [rsp + 176] LONG $0x206e394c // cmp qword [rsi + 32], r13 - LONG $0x24549f0f; BYTE $0x70 // setg byte [rsp + 112] + LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] LONG $0x286e394c // cmp qword [rsi + 40], r13 LONG $0x24549f0f; BYTE $0x58 // setg byte [rsp + 88] LONG $0x306e394c // cmp qword [rsi + 48], r13 WORD $0x9f0f; BYTE $0xd0 // setg al LONG $0x386e394c // cmp qword [rsi + 56], r13 - WORD $0x9f0f; BYTE $0xd3 // setg bl + LONG $0xd69f0f41 // setg r14b LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x0000009024949f0f // setg byte [rsp + 144] + QUAD $0x0000008024949f0f // setg byte [rsp + 128] LONG $0x486e394c // cmp qword [rsi + 72], r13 WORD $0x9f0f; BYTE $0xd2 // setg dl LONG $0x506e394c // cmp qword [rsi + 80], r13 LONG $0xd19f0f41 // setg r9b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd29f0f41 // setg r10b - LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd39f0f41 // setg r11b + LONG $0x606e394c // cmp qword [rsi + 96], r13 + WORD $0x9f0f; BYTE $0xd3 // setg bl LONG $0x686e394c // cmp qword [rsi + 104], r13 LONG $0xd49f0f41 // setg r12b LONG $0x706e394c // cmp qword [rsi + 112], r13 @@ -34665,143 +35975,144 @@ LBB7_161: LONG $0x786e394c // cmp qword [rsi + 120], r13 WORD $0x9f0f; BYTE $0xd1 // setg cl LONG $0x80ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 128], r13 - LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] + LONG $0x24549f0f; BYTE $0x48 // setg byte [rsp + 72] LONG $0x88ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 136], r13 - LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] + QUAD $0x0000009024949f0f // setg byte [rsp + 144] LONG $0x90ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 144], r13 - LONG $0x24549f0f; BYTE $0x68 // setg byte [rsp + 104] + LONG $0x24549f0f; BYTE $0x70 // setg byte [rsp + 112] LONG $0x98ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 152], r13 - LONG $0x24549f0f; BYTE $0x60 // setg byte [rsp + 96] + LONG $0x24549f0f; BYTE $0x78 // setg byte [rsp + 120] LONG $0xa0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 160], r13 - LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] + LONG $0x24549f0f; BYTE $0x50 // setg byte [rsp + 80] LONG $0xa8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 168], r13 - LONG $0x24549f0f; BYTE $0x48 // setg byte [rsp + 72] + LONG $0x24549f0f; BYTE $0x60 // setg byte [rsp + 96] LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 - LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] + LONG $0x24549f0f; BYTE $0x40 // setg byte [rsp + 64] LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 LONG $0xd79f0f41 // setg r15b LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 - LONG $0x24549f0f; BYTE $0x08 // setg byte [rsp + 8] + LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 - LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] + LONG $0x24549f0f; BYTE $0x38 // setg byte [rsp + 56] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 - LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] + LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 - LONG $0x24549f0f; BYTE $0x20 // setg byte [rsp + 32] + LONG $0x24549f0f; BYTE $0x30 // setg byte [rsp + 48] LONG $0xe0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 224], r13 - LONG $0x24549f0f; BYTE $0x28 // setg byte [rsp + 40] + LONG $0x24549f0f; BYTE $0x18 // setg byte [rsp + 24] LONG $0xe8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 232], r13 LONG $0x24549f0f; BYTE $0x10 // setg byte [rsp + 16] LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 - LONG $0x24149f0f // setg byte [rsp] + LONG $0x24549f0f; BYTE $0x08 // setg byte [rsp + 8] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 LONG $0xd09f0f41 // setg r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000c024bc0240 // add dil, byte [rsp + 192] + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000d024940244 // add r10b, byte [rsp + 208] WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil + LONG $0x07e6c041 // shl r14b, 7 + WORD $0x0841; BYTE $0xc6 // or r14b, al + LONG $0x02e7c040 // shl dil, 2 + WORD $0x0844; BYTE $0xd7 // or dil, r10b WORD $0xd200 // add dl, dl - LONG $0x90249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 144] - QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + LONG $0x80249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 128] + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0x0844; BYTE $0xf0 // or al, r14b + WORD $0x0840; BYTE $0xf8 // or al, dil + LONG $0x24148b4c // mov r10, qword [rsp] LONG $0x02e1c041 // shl r9b, 2 WORD $0x0841; BYTE $0xd1 // or r9b, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] + LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0xc208 // or dl, al WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b + LONG $0x03e3c041 // shl r11b, 3 + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xdb // or bl, r11b LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b + WORD $0x0841; BYTE $0xdc // or r12b, bl QUAD $0x000000a024bcb60f // movzx edi, byte [rsp + 160] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0x0841; BYTE $0xd6 // or r14b, dl WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] - LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] + QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] WORD $0xd200 // add dl, dl - LONG $0x50245402 // add dl, byte [rsp + 80] + LONG $0x48245402 // add dl, byte [rsp + 72] WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] + LONG $0x2454b60f; BYTE $0x78 // movzx edx, byte [rsp + 120] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] + LONG $0x2454b60f; BYTE $0x50 // movzx edx, byte [rsp + 80] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xfa // or dl, dil WORD $0xd789 // mov edi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] + LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xfa // or dl, dil - LONG $0x241c8841 // mov byte [r12], bl - LONG $0x245cb60f; BYTE $0x38 // movzx ebx, byte [rsp + 56] + WORD $0x8845; BYTE $0x32 // mov byte [r10], r14b + LONG $0x245cb60f; BYTE $0x40 // movzx ebx, byte [rsp + 64] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x244c8841; BYTE $0x01 // mov byte [r12 + 1], cl + LONG $0x014a8841 // mov byte [r10 + 1], cl WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] + LONG $0x244cb60f; BYTE $0x38 // movzx ecx, byte [rsp + 56] WORD $0xc900 // add cl, cl - LONG $0x08244c02 // add cl, byte [rsp + 8] + LONG $0x20244c02 // add cl, byte [rsp + 32] WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] + LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] + LONG $0x244cb60f; BYTE $0x30 // movzx ecx, byte [rsp + 48] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x28 // movzx ecx, byte [rsp + 40] + LONG $0x244cb60f; BYTE $0x18 // movzx ecx, byte [rsp + 24] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xd108 // or cl, dl WORD $0xca89 // mov edx, ecx LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0xd108 // or cl, dl - LONG $0x2414b60f // movzx edx, byte [rsp] + LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] WORD $0xe2c0; BYTE $0x06 // shl dl, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xd0 // or r8b, dl WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x247c8845; BYTE $0x02 // mov byte [r12 + 2], r15b - LONG $0x24448845; BYTE $0x03 // mov byte [r12 + 3], r8b + LONG $0x027a8845 // mov byte [r10 + 2], r15b + LONG $0x03428845 // mov byte [r10 + 3], r8b LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 - LONG $0x04c48349 // add r12, 4 - QUAD $0x000000b024848348; BYTE $0xff // add qword [rsp + 176], -1 - JNE LBB7_161 + LONG $0x04c28349 // add r10, 4 + LONG $0x2414894c // mov qword [rsp], r10 + QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 + JNE LBB7_163 QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] QUAD $0x000000f024948b4c // mov r10, qword [rsp + 240] -LBB7_163: +LBB7_165: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xda // add r10, r11 - JNE LBB7_168 + JNE LBB7_170 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_166 + JMP LBB7_168 -LBB7_170: +LBB7_172: LONG $0x1f538d4d // lea r10, [r11 + 31] WORD $0x854d; BYTE $0xdb // test r11, r11 LONG $0xd3490f4d // cmovns r10, r11 @@ -34811,19 +36122,21 @@ LBB7_170: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x100f44f3; BYTE $0x1a // movss xmm11, dword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB7_174 + JE LBB7_176 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_172: - LONG $0x1e2e0f44 // ucomiss xmm11, dword [rsi] - LONG $0x04768d48 // lea rsi, [rsi + 4] - WORD $0xd219 // sbb edx, edx +LBB7_174: + LONG $0x06100ff3 // movss xmm0, dword [rsi] + LONG $0x04c68348 // add rsi, 4 + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + WORD $0x970f; BYTE $0xd2 // seta dl + WORD $0xdaf6 // neg dl LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax LONG $0x03ffc148 // sar rdi, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x3044; BYTE $0xca // xor dl, r9b QUAD $0x00000000fd048d44 // lea r8d, [8*rdi] WORD $0xc189 // mov ecx, eax @@ -34832,203 +36145,235 @@ LBB7_172: WORD $0xe3d3 // shl ebx, cl WORD $0xd320 // and bl, dl WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB7_172 - LONG $0x01c48349 // add r12, 1 + JNE LBB7_174 + LONG $0x24048348; BYTE $0x01 // add qword [rsp], 1 -LBB7_174: +LBB7_176: LONG $0x05fac149 // sar r10, 5 LONG $0x20fb8349 // cmp r11, 32 - JL LBB7_175 + JL LBB7_177 LONG $0x04fa8349 // cmp r10, 4 - JB LBB7_177 + JB LBB7_179 WORD $0x894c; BYTE $0xd0 // mov rax, r10 LONG $0x07e0c148 // shl rax, 7 WORD $0x0148; BYTE $0xf0 // add rax, rsi - WORD $0x3949; BYTE $0xc4 // cmp r12, rax - JAE LBB7_180 - LONG $0x94048d4b // lea rax, [r12 + 4*r10] + LONG $0x24043948 // cmp qword [rsp], rax + JAE LBB7_182 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x90048d4a // lea rax, [rax + 4*r10] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB7_180 + JBE LBB7_182 -LBB7_177: - WORD $0x3145; BYTE $0xc0 // xor r8d, r8d - WORD $0x8948; BYTE $0xf3 // mov rbx, rsi - WORD $0x894d; BYTE $0xe6 // mov r14, r12 +LBB7_179: + WORD $0xc031 // xor eax, eax + WORD $0x8948; BYTE $0xf2 // mov rdx, rsi + LONG $0x243c8b4c // mov r15, qword [rsp] -LBB7_183: +LBB7_185: + LONG $0x243c894c // mov qword [rsp], r15 QUAD $0x00000088249c894c // mov qword [rsp + 136], r11 - QUAD $0x000000b02494894c // mov qword [rsp + 176], r10 - WORD $0x294d; BYTE $0xc2 // sub r10, r8 QUAD $0x000000c02494894c // mov qword [rsp + 192], r10 + WORD $0x2949; BYTE $0xc2 // sub r10, rax + QUAD $0x000000d02494894c // mov qword [rsp + 208], r10 -LBB7_184: - LONG $0x2434894c // mov qword [rsp], r14 - LONG $0x1b2e0f44 // ucomiss xmm11, dword [rbx] - QUAD $0x000000d02494920f // setb byte [rsp + 208] - LONG $0x5b2e0f44; BYTE $0x04 // ucomiss xmm11, dword [rbx + 4] - LONG $0xd0920f41 // setb r8b - LONG $0x5b2e0f44; BYTE $0x08 // ucomiss xmm11, dword [rbx + 8] - LONG $0xd6920f41 // setb r14b - LONG $0x5b2e0f44; BYTE $0x0c // ucomiss xmm11, dword [rbx + 12] - LONG $0xd5920f41 // setb r13b - LONG $0x5b2e0f44; BYTE $0x10 // ucomiss xmm11, dword [rbx + 16] - LONG $0x2454920f; BYTE $0x70 // setb byte [rsp + 112] - LONG $0x5b2e0f44; BYTE $0x14 // ucomiss xmm11, dword [rbx + 20] - LONG $0x2454920f; BYTE $0x58 // setb byte [rsp + 88] - LONG $0x5b2e0f44; BYTE $0x18 // ucomiss xmm11, dword [rbx + 24] - WORD $0x920f; BYTE $0xd0 // setb al - LONG $0x5b2e0f44; BYTE $0x1c // ucomiss xmm11, dword [rbx + 28] - LONG $0xd3920f41 // setb r11b - LONG $0x5b2e0f44; BYTE $0x20 // ucomiss xmm11, dword [rbx + 32] - QUAD $0x000000a02494920f // setb byte [rsp + 160] - LONG $0x5b2e0f44; BYTE $0x24 // ucomiss xmm11, dword [rbx + 36] - WORD $0x920f; BYTE $0xd2 // setb dl - LONG $0x5b2e0f44; BYTE $0x28 // ucomiss xmm11, dword [rbx + 40] - LONG $0xd6920f40 // setb sil - LONG $0x5b2e0f44; BYTE $0x2c // ucomiss xmm11, dword [rbx + 44] - LONG $0xd1920f41 // setb r9b - LONG $0x5b2e0f44; BYTE $0x30 // ucomiss xmm11, dword [rbx + 48] - LONG $0xd2920f41 // setb r10b - LONG $0x5b2e0f44; BYTE $0x34 // ucomiss xmm11, dword [rbx + 52] - LONG $0xd4920f41 // setb r12b - LONG $0x5b2e0f44; BYTE $0x38 // ucomiss xmm11, dword [rbx + 56] - LONG $0x2454920f; BYTE $0x78 // setb byte [rsp + 120] - LONG $0x5b2e0f44; BYTE $0x3c // ucomiss xmm11, dword [rbx + 60] - LONG $0xd7920f40 // setb dil - LONG $0x5b2e0f44; BYTE $0x40 // ucomiss xmm11, dword [rbx + 64] - LONG $0x2454920f; BYTE $0x50 // setb byte [rsp + 80] - LONG $0x5b2e0f44; BYTE $0x44 // ucomiss xmm11, dword [rbx + 68] - QUAD $0x000000902494920f // setb byte [rsp + 144] - LONG $0x5b2e0f44; BYTE $0x48 // ucomiss xmm11, dword [rbx + 72] - LONG $0x2454920f; BYTE $0x68 // setb byte [rsp + 104] - LONG $0x5b2e0f44; BYTE $0x4c // ucomiss xmm11, dword [rbx + 76] - LONG $0x2454920f; BYTE $0x60 // setb byte [rsp + 96] - LONG $0x5b2e0f44; BYTE $0x50 // ucomiss xmm11, dword [rbx + 80] - LONG $0x2454920f; BYTE $0x40 // setb byte [rsp + 64] - LONG $0x5b2e0f44; BYTE $0x54 // ucomiss xmm11, dword [rbx + 84] - LONG $0x2454920f; BYTE $0x48 // setb byte [rsp + 72] - LONG $0x5b2e0f44; BYTE $0x58 // ucomiss xmm11, dword [rbx + 88] - LONG $0x2454920f; BYTE $0x38 // setb byte [rsp + 56] - LONG $0x5b2e0f44; BYTE $0x5c // ucomiss xmm11, dword [rbx + 92] - LONG $0xd7920f41 // setb r15b - LONG $0x5b2e0f44; BYTE $0x60 // ucomiss xmm11, dword [rbx + 96] - LONG $0x2454920f; BYTE $0x08 // setb byte [rsp + 8] - LONG $0x5b2e0f44; BYTE $0x64 // ucomiss xmm11, dword [rbx + 100] - LONG $0x2454920f; BYTE $0x30 // setb byte [rsp + 48] - LONG $0x5b2e0f44; BYTE $0x68 // ucomiss xmm11, dword [rbx + 104] - LONG $0x2454920f; BYTE $0x18 // setb byte [rsp + 24] - LONG $0x5b2e0f44; BYTE $0x6c // ucomiss xmm11, dword [rbx + 108] - LONG $0x2454920f; BYTE $0x20 // setb byte [rsp + 32] - LONG $0x5b2e0f44; BYTE $0x70 // ucomiss xmm11, dword [rbx + 112] - LONG $0x2454920f; BYTE $0x28 // setb byte [rsp + 40] - LONG $0x5b2e0f44; BYTE $0x74 // ucomiss xmm11, dword [rbx + 116] - LONG $0x2454920f; BYTE $0x10 // setb byte [rsp + 16] - LONG $0x5b2e0f44; BYTE $0x78 // ucomiss xmm11, dword [rbx + 120] - QUAD $0x000000802494920f // setb byte [rsp + 128] - LONG $0x5b2e0f44; BYTE $0x7c // ucomiss xmm11, dword [rbx + 124] - WORD $0x920f; BYTE $0xd1 // setb cl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000d024840244 // add r8b, byte [rsp + 208] +LBB7_186: + LONG $0x02100ff3 // movss xmm0, dword [rdx] + LONG $0x4a100ff3; BYTE $0x04 // movss xmm1, dword [rdx + 4] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x2454970f; BYTE $0x18 // seta byte [rsp + 24] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + WORD $0x970f; BYTE $0xd3 // seta bl + LONG $0x42100ff3; BYTE $0x08 // movss xmm0, dword [rdx + 8] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x0c // movss xmm0, dword [rdx + 12] + LONG $0xd5970f41 // seta r13b + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd4970f41 // seta r12b + LONG $0x42100ff3; BYTE $0x10 // movss xmm0, dword [rdx + 16] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x14 // movss xmm0, dword [rdx + 20] + LONG $0x2454970f; BYTE $0x50 // seta byte [rsp + 80] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x2454970f; BYTE $0x60 // seta byte [rsp + 96] + LONG $0x42100ff3; BYTE $0x18 // movss xmm0, dword [rdx + 24] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x1c // movss xmm0, dword [rdx + 28] + LONG $0x2454970f; BYTE $0x58 // seta byte [rsp + 88] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x2454970f; BYTE $0x40 // seta byte [rsp + 64] + LONG $0x42100ff3; BYTE $0x20 // movss xmm0, dword [rdx + 32] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x24 // movss xmm0, dword [rdx + 36] + LONG $0x2454970f; BYTE $0x10 // seta byte [rsp + 16] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + WORD $0x970f; BYTE $0xd0 // seta al + LONG $0x42100ff3; BYTE $0x28 // movss xmm0, dword [rdx + 40] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x2c // movss xmm0, dword [rdx + 44] + LONG $0x2454970f; BYTE $0x28 // seta byte [rsp + 40] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x2454970f; BYTE $0x20 // seta byte [rsp + 32] + LONG $0x42100ff3; BYTE $0x30 // movss xmm0, dword [rdx + 48] + LONG $0x52100ff3; BYTE $0x34 // movss xmm2, dword [rdx + 52] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x62100ff3; BYTE $0x38 // movss xmm4, dword [rdx + 56] + LONG $0x6a100ff3; BYTE $0x3c // movss xmm5, dword [rdx + 60] + LONG $0x2454970f; BYTE $0x78 // seta byte [rsp + 120] + LONG $0x100f44f3; WORD $0x4042 // movss xmm8, dword [rdx + 64] + LONG $0x100f44f3; WORD $0x444a // movss xmm9, dword [rdx + 68] + LONG $0xd32e0f41 // ucomiss xmm2, xmm11 + LONG $0x100f44f3; WORD $0x4852 // movss xmm10, dword [rdx + 72] + LONG $0x100f44f3; WORD $0x4c62 // movss xmm12, dword [rdx + 76] + QUAD $0x000000902494970f // seta byte [rsp + 144] + LONG $0x100f44f3; WORD $0x506a // movss xmm13, dword [rdx + 80] + LONG $0x100f44f3; WORD $0x5472 // movss xmm14, dword [rdx + 84] + LONG $0xe32e0f41 // ucomiss xmm4, xmm11 + LONG $0x62100ff3; BYTE $0x58 // movss xmm4, dword [rdx + 88] + LONG $0x42100ff3; BYTE $0x5c // movss xmm0, dword [rdx + 92] + QUAD $0x000000802494970f // seta byte [rsp + 128] + LONG $0x4a100ff3; BYTE $0x60 // movss xmm1, dword [rdx + 96] + LONG $0x52100ff3; BYTE $0x64 // movss xmm2, dword [rdx + 100] + LONG $0xeb2e0f41 // ucomiss xmm5, xmm11 + LONG $0x6a100ff3; BYTE $0x68 // movss xmm5, dword [rdx + 104] + LONG $0x5a100ff3; BYTE $0x6c // movss xmm3, dword [rdx + 108] + LONG $0xd3970f41 // seta r11b + LONG $0x72100ff3; BYTE $0x70 // movss xmm6, dword [rdx + 112] + LONG $0x7a100ff3; BYTE $0x74 // movss xmm7, dword [rdx + 116] + LONG $0xc32e0f45 // ucomiss xmm8, xmm11 + QUAD $0x000000b02494970f // seta byte [rsp + 176] + LONG $0xcb2e0f45 // ucomiss xmm9, xmm11 + WORD $0x970f; BYTE $0xd1 // seta cl + LONG $0xd32e0f45 // ucomiss xmm10, xmm11 + LONG $0xd6970f40 // seta sil + LONG $0xe32e0f45 // ucomiss xmm12, xmm11 + LONG $0xd0970f41 // seta r8b + LONG $0xeb2e0f45 // ucomiss xmm13, xmm11 + LONG $0xd2970f41 // seta r10b + LONG $0xf32e0f45 // ucomiss xmm14, xmm11 + LONG $0xd6970f41 // seta r14b + LONG $0xe32e0f41 // ucomiss xmm4, xmm11 + QUAD $0x000000a02494970f // seta byte [rsp + 160] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd1970f41 // seta r9b + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x2454970f; BYTE $0x68 // seta byte [rsp + 104] + LONG $0xd32e0f41 // ucomiss xmm2, xmm11 + LONG $0xd7970f41 // seta r15b + LONG $0xeb2e0f41 // ucomiss xmm5, xmm11 + LONG $0x2454970f; BYTE $0x70 // seta byte [rsp + 112] + LONG $0xdb2e0f41 // ucomiss xmm3, xmm11 + LONG $0x2454970f; BYTE $0x48 // seta byte [rsp + 72] + LONG $0xf32e0f41 // ucomiss xmm6, xmm11 + LONG $0x2454970f; BYTE $0x38 // seta byte [rsp + 56] + LONG $0xfb2e0f41 // ucomiss xmm7, xmm11 + LONG $0x42100ff3; BYTE $0x78 // movss xmm0, dword [rdx + 120] + LONG $0x2454970f; BYTE $0x30 // seta byte [rsp + 48] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x2454970f; BYTE $0x08 // seta byte [rsp + 8] + LONG $0x42100ff3; BYTE $0x7c // movss xmm0, dword [rdx + 124] + LONG $0x80ea8348 // sub rdx, -128 + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd7970f40 // seta dil + WORD $0xdb00 // add bl, bl + LONG $0x18245c02 // add bl, byte [rsp + 24] + LONG $0x02e5c041 // shl r13b, 2 + WORD $0x0841; BYTE $0xdd // or r13b, bl + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xec // or r12b, r13b + LONG $0x245cb60f; BYTE $0x50 // movzx ebx, byte [rsp + 80] + WORD $0xe3c0; BYTE $0x04 // shl bl, 4 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + WORD $0x8941; BYTE $0xdc // mov r12d, ebx + LONG $0x245cb60f; BYTE $0x60 // movzx ebx, byte [rsp + 96] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0x0844; BYTE $0xe3 // or bl, r12b + LONG $0x64b60f44; WORD $0x5824 // movzx r12d, byte [rsp + 88] + LONG $0x06e4c041 // shl r12b, 6 + LONG $0x6cb60f44; WORD $0x4024 // movzx r13d, byte [rsp + 64] + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0845; BYTE $0xe5 // or r13b, r12b + WORD $0xc000 // add al, al + LONG $0x10244402 // add al, byte [rsp + 16] + LONG $0x64b60f44; WORD $0x2824 // movzx r12d, byte [rsp + 40] + LONG $0x02e4c041 // shl r12b, 2 + WORD $0x0841; BYTE $0xc4 // or r12b, al + WORD $0x8944; BYTE $0xe0 // mov eax, r12d + LONG $0x64b60f44; WORD $0x2024 // movzx r12d, byte [rsp + 32] + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0841; BYTE $0xc4 // or r12b, al + LONG $0x2444b60f; BYTE $0x78 // movzx eax, byte [rsp + 120] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x0841; BYTE $0xdd // or r13b, bl + QUAD $0x00000090249cb60f // movzx ebx, byte [rsp + 144] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0xc308 // or bl, al + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e3c041 // shl r11b, 7 WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xc6 // or r14b, r8b - WORD $0xd200 // add dl, dl - LONG $0xa0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 160] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b + WORD $0xc900 // add cl, cl + LONG $0xb0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 176] LONG $0x02e6c040 // shl sil, 2 - WORD $0x0840; BYTE $0xd6 // or sil, dl - LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd0 // mov r8d, edx - LONG $0x03e1c041 // shl r9b, 3 - WORD $0x0841; BYTE $0xf1 // or r9b, sil - LONG $0x2454b60f; BYTE $0x58 // movzx edx, byte [rsp + 88] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xc2 // or dl, r8b + WORD $0x0840; BYTE $0xce // or sil, cl + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf0 // or r8b, sil LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0845; BYTE $0xca // or r10b, r9b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - LONG $0x2474b60f; BYTE $0x78 // movzx esi, byte [rsp + 120] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e7c040 // shl dil, 7 - WORD $0x0840; BYTE $0xf7 // or dil, sil - WORD $0x0841; BYTE $0xd3 // or r11b, dl - WORD $0x0844; BYTE $0xe7 // or dil, r12b - LONG $0x24348b4c // mov r14, qword [rsp] - QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] - WORD $0xc000 // add al, al - LONG $0x50244402 // add al, byte [rsp + 80] - LONG $0x2454b60f; BYTE $0x68 // movzx edx, byte [rsp + 104] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x60 // movzx edx, byte [rsp + 96] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x40 // movzx edx, byte [rsp + 64] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - LONG $0x2454b60f; BYTE $0x48 // movzx edx, byte [rsp + 72] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0x8845; BYTE $0x1e // mov byte [r14], r11b - LONG $0x2474b60f; BYTE $0x38 // movzx esi, byte [rsp + 56] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x017e8841 // mov byte [r14 + 1], dil - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] - WORD $0xc000 // add al, al - LONG $0x08244402 // add al, byte [rsp + 8] - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x18 // movzx eax, byte [rsp + 24] + WORD $0x0845; BYTE $0xc2 // or r10b, r8b + LONG $0x05e6c041 // shl r14b, 5 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0841; BYTE $0xdb // or r11b, bl + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e1c041 // shl r9b, 7 + WORD $0x0841; BYTE $0xc1 // or r9b, al + LONG $0x24348b48 // mov rsi, qword [rsp] + WORD $0x8844; BYTE $0x2e // mov byte [rsi], r13b + WORD $0x0845; BYTE $0xf1 // or r9b, r14b + WORD $0x0045; BYTE $0xff // add r15b, r15b + LONG $0x247c0244; BYTE $0x68 // add r15b, byte [rsp + 104] + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0x0844; BYTE $0xf8 // or al, r15b + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x48 // movzx eax, byte [rsp + 72] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x38 // movzx eax, byte [rsp + 56] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] - WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - QUAD $0x000000802494b60f // movzx edx, byte [rsp + 128] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0xd108 // or cl, dl - WORD $0xc108 // or cl, al - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x034e8841 // mov byte [r14 + 3], cl - LONG $0x80c38148; WORD $0x0000; BYTE $0x00 // add rbx, 128 - LONG $0x04c68349 // add r14, 4 - QUAD $0x000000c024848348; BYTE $0xff // add qword [rsp + 192], -1 - JNE LBB7_184 + WORD $0xc808 // or al, cl + LONG $0x245cb60f; BYTE $0x30 // movzx ebx, byte [rsp + 48] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0xc308 // or bl, al + LONG $0x015e8844 // mov byte [rsi + 1], r11b + LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xc7 // or dil, al + LONG $0x024e8844 // mov byte [rsi + 2], r9b + WORD $0x0840; BYTE $0xdf // or dil, bl + LONG $0x037e8840 // mov byte [rsi + 3], dil + LONG $0x04c68348 // add rsi, 4 + LONG $0x24348948 // mov qword [rsp], rsi + QUAD $0x000000d024848348; BYTE $0xff // add qword [rsp + 208], -1 + JNE LBB7_186 + LONG $0x243c8b4c // mov r15, qword [rsp] QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] - QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] - JMP LBB7_186 + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] + JMP LBB7_188 LBB7_9: - LONG $0x2464894c; BYTE $0x58 // mov qword [rsp + 88], r12 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax LBB7_90: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 @@ -35038,49 +36383,65 @@ LBB7_90: JMP LBB7_96 LBB7_61: - LONG $0x2464894c; BYTE $0x48 // mov qword [rsp + 72], r12 - WORD $0x8949; BYTE $0xf6 // mov r14, rsi + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax + WORD $0x8949; BYTE $0xf4 // mov r12, rsi LBB7_72: - LONG $0x05e2c149 // shl r10, 5 - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + LONG $0x05e7c149 // shl r15, 5 + WORD $0x394d; BYTE $0xdf // cmp r15, r11 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 - WORD $0x294d; BYTE $0xd0 // sub r8, r10 - WORD $0xf749; BYTE $0xd2 // not r10 - WORD $0x014d; BYTE $0xda // add r10, r11 + WORD $0x294d; BYTE $0xf8 // sub r8, r15 + WORD $0xf749; BYTE $0xd7 // not r15 + WORD $0x014d; BYTE $0xdf // add r15, r11 JNE LBB7_75 WORD $0xc031 // xor eax, eax JMP LBB7_78 -LBB7_175: - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - WORD $0x8948; BYTE $0xf3 // mov rbx, rsi +LBB7_119: + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_186: +LBB7_130: + LONG $0x05e7c149 // shl r15, 5 + WORD $0x394d; BYTE $0xdf // cmp r15, r11 + JGE LBB7_202 + WORD $0x894d; BYTE $0xd8 // mov r8, r11 + WORD $0x294d; BYTE $0xf8 // sub r8, r15 + WORD $0xf749; BYTE $0xd7 // not r15 + WORD $0x014d; BYTE $0xdf // add r15, r11 + JNE LBB7_135 + WORD $0x3145; BYTE $0xff // xor r15d, r15d + JMP LBB7_133 + +LBB7_177: + LONG $0x243c8b4c // mov r15, qword [rsp] + WORD $0x8948; BYTE $0xf2 // mov rdx, rsi + +LBB7_188: LONG $0x05e2c149 // shl r10, 5 WORD $0x394d; BYTE $0xda // cmp r10, r11 - JGE LBB7_200 + JGE LBB7_202 WORD $0x894d; BYTE $0xd8 // mov r8, r11 WORD $0x294d; BYTE $0xd0 // sub r8, r10 WORD $0xf749; BYTE $0xd2 // not r10 WORD $0x014d; BYTE $0xda // add r10, r11 - JNE LBB7_191 + JNE LBB7_193 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB7_189 + JMP LBB7_191 -LBB7_153: +LBB7_155: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_154: +LBB7_156: WORD $0x3b4c; BYTE $0x2e // cmp r13, qword [rsi] WORD $0xff19 // sbb edi, edi WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x14b60f45; BYTE $0x14 // movzx r10d, byte [r12 + rdx] + LONG $0x14b60f45; BYTE $0x16 // movzx r10d, byte [r14 + rdx] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b0 // mov al, 1 @@ -35088,7 +36449,7 @@ LBB7_154: WORD $0x3044; BYTE $0xd7 // xor dil, r10b WORD $0x2040; BYTE $0xf8 // and al, dil WORD $0x3044; BYTE $0xd0 // xor al, r10b - LONG $0x14048841 // mov byte [r12 + rdx], al + LONG $0x16048841 // mov byte [r14 + rdx], al LONG $0x02c38349 // add r11, 2 LONG $0x086e3b4c // cmp r13, qword [rsi + 8] LONG $0x10768d48 // lea rsi, [rsi + 16] @@ -35099,29 +36460,29 @@ LBB7_154: WORD $0xe3d2 // shl bl, cl WORD $0x2040; BYTE $0xfb // and bl, dil WORD $0xc330 // xor bl, al - LONG $0x141c8841 // mov byte [r12 + rdx], bl + LONG $0x161c8841 // mov byte [r14 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB7_154 + JNE LBB7_156 LBB7_40: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 + JE LBB7_202 WORD $0x3b4c; BYTE $0x2e // cmp r13, qword [rsi] - JMP LBB7_197 + JMP LBB7_111 -LBB7_151: +LBB7_153: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_152: +LBB7_154: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -35129,7 +36490,7 @@ LBB7_152: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x046e3944 // cmp dword [rsi + 4], r13d LONG $0x08768d48 // lea rsi, [rsi + 8] @@ -35141,25 +36502,25 @@ LBB7_152: WORD $0xe2d2 // shl dl, cl WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl - LONG $0x3c148841 // mov byte [r12 + rdi], dl + LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_152 + JNE LBB7_154 -LBB7_148: +LBB7_150: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 + JE LBB7_202 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - JMP LBB7_150 + JMP LBB7_152 LBB7_93: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - LONG $0x245c8b4c; BYTE $0x58 // mov r11, qword [rsp + 88] + LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] LBB7_94: WORD $0x894c; BYTE $0xc8 // mov rax, r9 - LONG $0x0e343846 // cmp byte [rsi + r9], r14b + LONG $0x0e243846 // cmp byte [rsi + r9], r12b WORD $0x9f0f; BYTE $0xd3 // setg bl WORD $0xdbf6 // neg bl WORD $0x894c; BYTE $0xcf // mov rdi, r9 @@ -35173,7 +36534,7 @@ LBB7_94: WORD $0xda20 // and dl, bl WORD $0x3044; BYTE $0xca // xor dl, r9b LONG $0x3b148841 // mov byte [r11 + rdi], dl - LONG $0x06743844; BYTE $0x01 // cmp byte [rsi + rax + 1], r14b + LONG $0x06643844; BYTE $0x01 // cmp byte [rsi + rax + 1], r12b LONG $0x02488d4c // lea r9, [rax + 2] WORD $0x9f0f; BYTE $0xd3 // setg bl WORD $0xdbf6 // neg bl @@ -35190,13 +36551,13 @@ LBB7_94: LBB7_96: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 - WORD $0x3844; BYTE $0x36 // cmp byte [rsi], r14b + JE LBB7_202 + WORD $0x3844; BYTE $0x26 // cmp byte [rsi], r12b WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xca // mov rdx, r9 LONG $0x03eac148 // shr rdx, 3 - LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] + LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] LONG $0x103c8a41 // mov dil, byte [r8 + rdx] LONG $0x07e18041 // and r9b, 7 WORD $0x01b3 // mov bl, 1 @@ -35206,17 +36567,16 @@ LBB7_96: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB7_200 + JMP LBB7_202 LBB7_75: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0xc031 // xor eax, eax - LONG $0x245c8b4c; BYTE $0x48 // mov r11, qword [rsp + 72] - LONG $0x24548a44; BYTE $0x28 // mov r10b, byte [rsp + 40] + LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] LBB7_76: - LONG $0x06143a45 // cmp r10b, byte [r14 + rax] + LONG $0x04143a45 // cmp r10b, byte [r12 + rax] WORD $0xf619 // sbb esi, esi WORD $0x8948; BYTE $0xc7 // mov rdi, rax LONG $0x03efc148 // shr rdi, 3 @@ -35229,7 +36589,7 @@ LBB7_76: WORD $0x2040; BYTE $0xf2 // and dl, sil WORD $0xda30 // xor dl, bl LONG $0x3b148841 // mov byte [r11 + rdi], dl - LONG $0x06543a45; BYTE $0x01 // cmp r10b, byte [r14 + rax + 1] + LONG $0x04543a45; BYTE $0x01 // cmp r10b, byte [r12 + rax + 1] LONG $0x02408d48 // lea rax, [rax + 2] WORD $0xf619 // sbb esi, esi WORD $0x3040; BYTE $0xd6 // xor sil, dl @@ -35241,17 +36601,16 @@ LBB7_76: LONG $0x3b1c8841 // mov byte [r11 + rdi], bl WORD $0x3949; BYTE $0xc1 // cmp r9, rax JNE LBB7_76 - WORD $0x0149; BYTE $0xc6 // add r14, rax + WORD $0x0149; BYTE $0xc4 // add r12, rax LBB7_78: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 - LONG $0x28244c8a // mov cl, byte [rsp + 40] - WORD $0x3a41; BYTE $0x0e // cmp cl, byte [r14] + JE LBB7_202 + LONG $0x24143a45 // cmp r10b, byte [r12] WORD $0xd219 // sbb edx, edx WORD $0x8948; BYTE $0xc6 // mov rsi, rax LONG $0x03eec148 // shr rsi, 3 - LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] + LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] LONG $0x303c8a41 // mov dil, byte [r8 + rsi] WORD $0x0724 // and al, 7 WORD $0x01b3 // mov bl, 1 @@ -35261,20 +36620,20 @@ LBB7_78: WORD $0xd320 // and bl, dl WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x301c8841 // mov byte [r8 + rsi], bl - JMP LBB7_200 + JMP LBB7_202 -LBB7_135: +LBB7_137: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_136: +LBB7_138: WORD $0x3b44; BYTE $0x2e // cmp r13d, dword [rsi] WORD $0xff19 // sbb edi, edi WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x14b60f45; BYTE $0x14 // movzx r10d, byte [r12 + rdx] + LONG $0x14b60f45; BYTE $0x16 // movzx r10d, byte [r14 + rdx] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b0 // mov al, 1 @@ -35282,7 +36641,7 @@ LBB7_136: WORD $0x3044; BYTE $0xd7 // xor dil, r10b WORD $0x2040; BYTE $0xf8 // and al, dil WORD $0x3044; BYTE $0xd0 // xor al, r10b - LONG $0x14048841 // mov byte [r12 + rdx], al + LONG $0x16048841 // mov byte [r14 + rdx], al LONG $0x02c38349 // add r11, 2 LONG $0x046e3b44 // cmp r13d, dword [rsi + 4] LONG $0x08768d48 // lea rsi, [rsi + 8] @@ -35293,68 +36652,82 @@ LBB7_136: WORD $0xe3d2 // shl bl, cl WORD $0x2040; BYTE $0xfb // and bl, dil WORD $0xc330 // xor bl, al - LONG $0x141c8841 // mov byte [r12 + rdx], bl + LONG $0x161c8841 // mov byte [r14 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB7_136 + JNE LBB7_138 LBB7_24: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 + JE LBB7_202 WORD $0x3b44; BYTE $0x2e // cmp r13d, dword [rsi] - JMP LBB7_197 + JMP LBB7_111 -LBB7_193: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d +LBB7_196: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xd2 // xor r10d, r10d + LONG $0x241c8b4c // mov r11, qword [rsp] -LBB7_194: - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - WORD $0xc019 // sbb eax, eax - WORD $0x894c; BYTE $0xdf // mov rdi, r11 +LBB7_197: + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xd7 // mov rdi, r10 LONG $0x03efc148 // shr rdi, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] - WORD $0x3044; BYTE $0xc8 // xor al, r9b - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl + LONG $0x14b60f41; BYTE $0x3b // movzx edx, byte [r11 + rdi] + WORD $0xd030 // xor al, dl WORD $0xc320 // and bl, al - WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl - LONG $0x02c38349 // add r11, 2 - LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] - LONG $0x10768d48 // lea rsi, [rsi + 16] - WORD $0xc019 // sbb eax, eax + WORD $0xd330 // xor bl, dl + LONG $0x3b1c8841 // mov byte [r11 + rdi], bl + LONG $0x02c28349 // add r10, 2 + LONG $0x4e100ff2; BYTE $0x08 // movsd xmm1, qword [rsi + 8] + LONG $0x10c68348 // add rsi, 16 + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b2 // mov dl, 1 WORD $0xe2d2 // shl dl, cl WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl - LONG $0x3c148841 // mov byte [r12 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_194 + LONG $0x3b148841 // mov byte [r11 + rdi], dl + WORD $0x394d; BYTE $0xd1 // cmp r9, r10 + JNE LBB7_197 -LBB7_195: - LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - JMP LBB7_197 +LBB7_198: + LONG $0x01c0f641 // test r8b, 1 + JE LBB7_202 + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xd2 // mov rdx, r10 + LONG $0x03eac148 // shr rdx, 3 + LONG $0x243c8b48 // mov rdi, qword [rsp] + LONG $0x17348a40 // mov sil, byte [rdi + rdx] + LONG $0x07e28041 // and r10b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xd1 // mov ecx, r10d + JMP LBB7_200 -LBB7_111: +LBB7_112: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_112: +LBB7_113: LONG $0x2e3b4466 // cmp r13w, word [rsi] WORD $0xff19 // sbb edi, edi WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x14b60f45; BYTE $0x14 // movzx r10d, byte [r12 + rdx] + LONG $0x14b60f45; BYTE $0x16 // movzx r10d, byte [r14 + rdx] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b0 // mov al, 1 @@ -35362,7 +36735,7 @@ LBB7_112: WORD $0x3044; BYTE $0xd7 // xor dil, r10b WORD $0x2040; BYTE $0xf8 // and al, dil WORD $0x3044; BYTE $0xd0 // xor al, r10b - LONG $0x14048841 // mov byte [r12 + rdx], al + LONG $0x16048841 // mov byte [r14 + rdx], al LONG $0x02c38349 // add r11, 2 LONG $0x6e3b4466; BYTE $0x02 // cmp r13w, word [rsi + 2] LONG $0x04768d48 // lea rsi, [rsi + 4] @@ -35373,97 +36746,42 @@ LBB7_112: WORD $0xe3d2 // shl bl, cl WORD $0x2040; BYTE $0xfb // and bl, dil WORD $0xc330 // xor bl, al - LONG $0x141c8841 // mov byte [r12 + rdx], bl + LONG $0x161c8841 // mov byte [r14 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB7_112 + JNE LBB7_113 LBB7_109: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 + JE LBB7_202 LONG $0x2e3b4466 // cmp r13w, word [rsi] -LBB7_197: +LBB7_111: WORD $0xc019 // sbb eax, eax WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 - LONG $0x14348a41 // mov sil, byte [r12 + rdx] + LONG $0x243c8b48 // mov rdi, qword [rsp] + LONG $0x17348a40 // mov sil, byte [rdi + rdx] LONG $0x07e38041 // and r11b, 7 WORD $0x01b3 // mov bl, 1 WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe3d2 // shl bl, cl WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al - JMP LBB7_198 + JMP LBB7_201 -LBB7_133: - WORD $0x894d; BYTE $0xc1 // mov r9, r8 - LONG $0xfee18349 // and r9, -2 - WORD $0x3145; BYTE $0xf6 // xor r14d, r14d - QUAD $0x000000f0249c8b44 // mov r11d, dword [rsp + 240] - -LBB7_134: - WORD $0x8948; BYTE $0xf0 // mov rax, rsi - LONG $0x1e394466 // cmp word [rsi], r11w - WORD $0x9f0f; BYTE $0xd2 // setg dl - WORD $0xdaf6 // neg dl - WORD $0x894c; BYTE $0xf7 // mov rdi, r14 - LONG $0x03efc148 // shr rdi, 3 - LONG $0x14b60f45; BYTE $0x3c // movzx r10d, byte [r12 + rdi] - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d - WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b3 // mov bl, 1 - WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xd2 // xor dl, r10b - WORD $0xd320 // and bl, dl - WORD $0x3044; BYTE $0xd3 // xor bl, r10b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl - LONG $0x02c68349 // add r14, 2 - LONG $0x5e394466; BYTE $0x02 // cmp word [rsi + 2], r11w - LONG $0x04768d48 // lea rsi, [rsi + 4] - WORD $0x9f0f; BYTE $0xd2 // setg dl - WORD $0xdaf6 // neg dl - WORD $0xda30 // xor dl, bl - WORD $0xc980; BYTE $0x01 // or cl, 1 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0xd020 // and al, dl - WORD $0xd830 // xor al, bl - LONG $0x3c048841 // mov byte [r12 + rdi], al - WORD $0x394d; BYTE $0xf1 // cmp r9, r14 - JNE LBB7_134 - -LBB7_131: - LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 - LONG $0xf024848b; WORD $0x0000; BYTE $0x00 // mov eax, dword [rsp + 240] - WORD $0x3966; BYTE $0x06 // cmp word [rsi], ax - WORD $0x9f0f; BYTE $0xd0 // setg al - WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xf2 // mov rdx, r14 - LONG $0x03eac148 // shr rdx, 3 - LONG $0x143c8a41 // mov dil, byte [r12 + rdx] - LONG $0x07e68041 // and r14b, 7 - WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xf1 // mov ecx, r14d - WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al - WORD $0x3040; BYTE $0xfb // xor bl, dil - JMP LBB7_199 - -LBB7_168: +LBB7_170: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d + LONG $0x24348b4c // mov r14, qword [rsp] -LBB7_169: +LBB7_171: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 - WORD $0x894d; BYTE $0xe6 // mov r14, r12 - LONG $0x0cb60f45; BYTE $0x3c // movzx r9d, byte [r12 + rdi] + LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 @@ -35471,7 +36789,7 @@ LBB7_169: WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b - LONG $0x3c1c8841 // mov byte [r12 + rdi], bl + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x02c38349 // add r11, 2 LONG $0x086e394c // cmp qword [rsi + 8], r13 LONG $0x10768d48 // lea rsi, [rsi + 16] @@ -35483,88 +36801,155 @@ LBB7_169: WORD $0xe2d2 // shl dl, cl WORD $0xc220 // and dl, al WORD $0xda30 // xor dl, bl - LONG $0x3c148841 // mov byte [r12 + rdi], dl + LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_169 + JNE LBB7_171 -LBB7_166: +LBB7_168: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 + JE LBB7_202 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 -LBB7_150: +LBB7_152: WORD $0x9f0f; BYTE $0xd0 // setg al WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 - LONG $0x14348a41 // mov sil, byte [r12 + rdx] + LONG $0x243c8b48 // mov rdi, qword [rsp] + LONG $0x17348a40 // mov sil, byte [rdi + rdx] LONG $0x07e38041 // and r11b, 7 WORD $0x01b3 // mov bl, 1 WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + +LBB7_200: WORD $0xe3d2 // shl bl, cl WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al -LBB7_198: +LBB7_201: WORD $0x3040; BYTE $0xf3 // xor bl, sil + WORD $0x1c88; BYTE $0x17 // mov byte [rdi + rdx], bl -LBB7_199: - LONG $0x141c8841 // mov byte [r12 + rdx], bl - -LBB7_200: +LBB7_202: MOVQ 336(SP), SP RET -LBB7_191: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0x3145; BYTE $0xdb // xor r11d, r11d +LBB7_135: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xff // xor r15d, r15d + QUAD $0x000000f0249c8b44 // mov r11d, dword [rsp + 240] -LBB7_192: - LONG $0x1b2e0f44 // ucomiss xmm11, dword [rbx] - WORD $0xd219 // sbb edx, edx - WORD $0x894c; BYTE $0xdf // mov rdi, r11 +LBB7_136: + WORD $0x8948; BYTE $0xf0 // mov rax, rsi + LONG $0x1e394466 // cmp word [rsi], r11w + WORD $0x9f0f; BYTE $0xd2 // setg dl + WORD $0xdaf6 // neg dl + WORD $0x894c; BYTE $0xff // mov rdi, r15 LONG $0x03efc148 // shr rdi, 3 - LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] - WORD $0x3044; BYTE $0xca // xor dl, r9b - WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + LONG $0x14b60f45; BYTE $0x3e // movzx r10d, byte [r14 + rdi] + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d WORD $0xe180; BYTE $0x06 // and cl, 6 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0x3044; BYTE $0xd2 // xor dl, r10b + WORD $0xd320 // and bl, dl + WORD $0x3044; BYTE $0xd3 // xor bl, r10b + LONG $0x3e1c8841 // mov byte [r14 + rdi], bl + LONG $0x02c78349 // add r15, 2 + LONG $0x5e394466; BYTE $0x02 // cmp word [rsi + 2], r11w + LONG $0x04768d48 // lea rsi, [rsi + 4] + WORD $0x9f0f; BYTE $0xd2 // setg dl + WORD $0xdaf6 // neg dl + WORD $0xda30 // xor dl, bl + WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b0 // mov al, 1 WORD $0xe0d2 // shl al, cl WORD $0xd020 // and al, dl - WORD $0x3044; BYTE $0xc8 // xor al, r9b + WORD $0xd830 // xor al, bl LONG $0x3e048841 // mov byte [r14 + rdi], al - LONG $0x02c38349 // add r11, 2 - LONG $0x5b2e0f44; BYTE $0x04 // ucomiss xmm11, dword [rbx + 4] - LONG $0x085b8d48 // lea rbx, [rbx + 8] - WORD $0xf619 // sbb esi, esi - WORD $0x3040; BYTE $0xc6 // xor sil, al - WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x394d; BYTE $0xf9 // cmp r9, r15 + JNE LBB7_136 + +LBB7_133: + LONG $0x01c0f641 // test r8b, 1 + JE LBB7_202 + LONG $0xf024848b; WORD $0x0000; BYTE $0x00 // mov eax, dword [rsp + 240] + WORD $0x3966; BYTE $0x06 // cmp word [rsi], ax + WORD $0x9f0f; BYTE $0xd0 // setg al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xfa // mov rdx, r15 + LONG $0x03eac148 // shr rdx, 3 + LONG $0x163c8a41 // mov dil, byte [r14 + rdx] + LONG $0x07e78041 // and r15b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xf9 // mov ecx, r15d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf8 // xor al, dil + WORD $0xc320 // and bl, al + WORD $0x3040; BYTE $0xfb // xor bl, dil + LONG $0x161c8841 // mov byte [r14 + rdx], bl + JMP LBB7_202 + +LBB7_193: + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + LONG $0xfee18349 // and r9, -2 + WORD $0x3145; BYTE $0xdb // xor r11d, r11d + WORD $0x894d; BYTE $0xfa // mov r10, r15 + WORD $0x8948; BYTE $0xd6 // mov rsi, rdx + +LBB7_194: + LONG $0x06100ff3 // movss xmm0, dword [rsi] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + WORD $0x970f; BYTE $0xd3 // seta bl + WORD $0xdbf6 // neg bl + WORD $0x894c; BYTE $0xdf // mov rdi, r11 + LONG $0x03efc148 // shr rdi, 3 + WORD $0x8944; BYTE $0xd9 // mov ecx, r11d + WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b2 // mov dl, 1 WORD $0xe2d2 // shl dl, cl - WORD $0x2040; BYTE $0xf2 // and dl, sil + LONG $0x04b60f41; BYTE $0x3a // movzx eax, byte [r10 + rdi] + WORD $0xc330 // xor bl, al + WORD $0xda20 // and dl, bl WORD $0xc230 // xor dl, al - LONG $0x3e148841 // mov byte [r14 + rdi], dl - WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB7_192 + LONG $0x3a148841 // mov byte [r10 + rdi], dl + LONG $0x02c38349 // add r11, 2 + LONG $0x46100ff3; BYTE $0x04 // movss xmm0, dword [rsi + 4] + LONG $0x08c68348 // add rsi, 8 + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al + WORD $0xd030 // xor al, dl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x01b3 // mov bl, 1 + WORD $0xe3d2 // shl bl, cl + WORD $0xc320 // and bl, al + WORD $0xd330 // xor bl, dl + LONG $0x3a1c8841 // mov byte [r10 + rdi], bl + WORD $0x394d; BYTE $0xd9 // cmp r9, r11 + JNE LBB7_194 + WORD $0x8948; BYTE $0xf2 // mov rdx, rsi -LBB7_189: +LBB7_191: LONG $0x01c0f641 // test r8b, 1 - JE LBB7_200 - LONG $0x1b2e0f44 // ucomiss xmm11, dword [rbx] - WORD $0xc019 // sbb eax, eax + JE LBB7_202 + LONG $0x02100ff3 // movss xmm0, dword [rdx] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + WORD $0x970f; BYTE $0xd0 // seta al + WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 - LONG $0x16348a41 // mov sil, byte [r14 + rdx] + LONG $0x173c8a41 // mov dil, byte [r15 + rdx] LONG $0x07e38041 // and r11b, 7 WORD $0x01b3 // mov bl, 1 WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf0 // xor al, sil + WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al - WORD $0x3040; BYTE $0xf3 // xor bl, sil - LONG $0x161c8841 // mov byte [r14 + rdx], bl - JMP LBB7_200 + WORD $0x3040; BYTE $0xfb // xor bl, dil + LONG $0x171c8841 // mov byte [r15 + rdx], bl + JMP LBB7_202 LBB7_84: LONG $0xf0e28349 // and r10, -16 @@ -35573,31 +36958,31 @@ LBB7_84: WORD $0x0148; BYTE $0xf0 // add rax, rsi QUAD $0x0000010824848948 // mov qword [rsp + 264], rax QUAD $0x000000e82494894c // mov qword [rsp + 232], r10 - LONG $0x94048d4b // lea rax, [r12 + 4*r10] - LONG $0x24448948; BYTE $0x58 // mov qword [rsp + 88], rax - LONG $0xc6b60f41 // movzx eax, r14b + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x90048d4a // lea rax, [rax + 4*r10] + LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax + LONG $0xc4b60f41 // movzx eax, r12b LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 - QUAD $0x0000a0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm1 + QUAD $0x000090248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 144], xmm1 WORD $0xc031 // xor eax, eax - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 LBB7_85: WORD $0x8948; BYTE $0xc7 // mov rdi, rax QUAD $0x000000f024848948 // mov qword [rsp + 240], rax LONG $0x05e7c148 // shl rdi, 5 - WORD $0x8949; BYTE $0xf8 // mov r8, rdi - WORD $0x8948; BYTE $0xfa // mov rdx, rdi - WORD $0x8949; BYTE $0xf9 // mov r9, rdi WORD $0x8949; BYTE $0xfc // mov r12, rdi + WORD $0x8948; BYTE $0xfa // mov rdx, rdi WORD $0x8949; BYTE $0xfb // mov r11, rdi + WORD $0x8949; BYTE $0xf9 // mov r9, rdi WORD $0x8948; BYTE $0xf8 // mov rax, rdi - LONG $0x247c8948; BYTE $0x20 // mov qword [rsp + 32], rdi + WORD $0x8949; BYTE $0xf8 // mov r8, rdi WORD $0x8949; BYTE $0xfe // mov r14, rdi WORD $0x8949; BYTE $0xfa // mov r10, rdi WORD $0x8949; BYTE $0xff // mov r15, rdi WORD $0x8948; BYTE $0xfb // mov rbx, rdi + LONG $0x247c8948; BYTE $0x38 // mov qword [rsp + 56], rdi LONG $0x3e0cb60f // movzx ecx, byte [rsi + rdi] LONG $0x6e0f4466; BYTE $0xf9 // movd xmm15, ecx LONG $0x3e4cb60f; BYTE $0x01 // movzx ecx, byte [rsi + rdi + 1] @@ -35614,15 +36999,15 @@ LBB7_85: LONG $0xd96e0f66 // movd xmm3, ecx LONG $0x3e4cb60f; BYTE $0x07 // movzx ecx, byte [rsi + rdi + 7] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x0000d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm0 + QUAD $0x0000a024847f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm0 LONG $0x3e4cb60f; BYTE $0x08 // movzx ecx, byte [rsi + rdi + 8] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 + QUAD $0x00013024847f0f66; BYTE $0x00 // movdqa oword [rsp + 304], xmm0 LONG $0x3e4cb60f; BYTE $0x09 // movzx ecx, byte [rsi + rdi + 9] LONG $0x6e0f4466; BYTE $0xd1 // movd xmm10, ecx LONG $0x3e4cb60f; BYTE $0x0a // movzx ecx, byte [rsi + rdi + 10] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00009024847f0f66; BYTE $0x00 // movdqa oword [rsp + 144], xmm0 + QUAD $0x0000b024847f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm0 LONG $0x3e4cb60f; BYTE $0x0b // movzx ecx, byte [rsi + rdi + 11] LONG $0x6e0f4466; BYTE $0xd9 // movd xmm11, ecx LONG $0x3e4cb60f; BYTE $0x0c // movzx ecx, byte [rsi + rdi + 12] @@ -35631,113 +37016,116 @@ LBB7_85: LONG $0x6e0f4466; BYTE $0xe1 // movd xmm12, ecx LONG $0x3e4cb60f; BYTE $0x0e // movzx ecx, byte [rsi + rdi + 14] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00013024847f0f66; BYTE $0x00 // movdqa oword [rsp + 304], xmm0 + QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 + LONG $0x247c8948; BYTE $0x28 // mov qword [rsp + 40], rdi WORD $0x8949; BYTE $0xfd // mov r13, rdi LONG $0x20cd8349 // or r13, 32 - LONG $0x246c894c; BYTE $0x28 // mov qword [rsp + 40], r13 - LONG $0x40c88349 // or r8, 64 + LONG $0x246c894c; BYTE $0x20 // mov qword [rsp + 32], r13 + LONG $0x40cc8349 // or r12, 64 LONG $0x60ca8348 // or rdx, 96 - LONG $0x24548948; BYTE $0x78 // mov qword [rsp + 120], rdx - LONG $0x80c98149; WORD $0x0000; BYTE $0x00 // or r9, 128 - LONG $0xa0cc8149; WORD $0x0000; BYTE $0x00 // or r12, 160 - LONG $0xc0cb8149; WORD $0x0000; BYTE $0x00 // or r11, 192 + LONG $0x24548948; BYTE $0x68 // mov qword [rsp + 104], rdx + LONG $0x80cb8149; WORD $0x0000; BYTE $0x00 // or r11, 128 LONG $0x245c894c; BYTE $0x40 // mov qword [rsp + 64], r11 - LONG $0x00e00d48; WORD $0x0000 // or rax, 224 - LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax - LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] - LONG $0x00cb8149; WORD $0x0001; BYTE $0x00 // or r11, 256 - LONG $0x20ce8149; WORD $0x0001; BYTE $0x00 // or r14, 288 - LONG $0x40ca8149; WORD $0x0001; BYTE $0x00 // or r10, 320 - LONG $0x60cf8149; WORD $0x0001; BYTE $0x00 // or r15, 352 - LONG $0x247c894c; BYTE $0x50 // mov qword [rsp + 80], r15 - LONG $0x80cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 384 + WORD $0x8949; BYTE $0xfb // mov r11, rdi + LONG $0xa0cb8149; WORD $0x0000; BYTE $0x00 // or r11, 160 + LONG $0x00c00d48; WORD $0x0000 // or rax, 192 + LONG $0x24448948; BYTE $0x50 // mov qword [rsp + 80], rax + LONG $0xe0c98149; WORD $0x0000; BYTE $0x00 // or r9, 224 + LONG $0x00ce8149; WORD $0x0001; BYTE $0x00 // or r14, 256 + QUAD $0x0000008024b4894c // mov qword [rsp + 128], r14 + LONG $0x20ca8149; WORD $0x0001; BYTE $0x00 // or r10, 288 + LONG $0x2454894c; BYTE $0x78 // mov qword [rsp + 120], r10 + LONG $0x40c88149; WORD $0x0001; BYTE $0x00 // or r8, 320 + LONG $0x2444894c; BYTE $0x48 // mov qword [rsp + 72], r8 + LONG $0x60cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 352 + LONG $0x245c8948; BYTE $0x58 // mov qword [rsp + 88], rbx + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + LONG $0x80c98148; WORD $0x0001; BYTE $0x00 // or rcx, 384 + LONG $0x244c8948; BYTE $0x38 // mov qword [rsp + 56], rcx WORD $0x8948; BYTE $0xf8 // mov rax, rdi LONG $0x01a00d48; WORD $0x0000 // or rax, 416 - LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax WORD $0x8948; BYTE $0xf8 // mov rax, rdi - WORD $0x8948; BYTE $0xf9 // mov rcx, rdi - LONG $0x247c8948; BYTE $0x18 // mov qword [rsp + 24], rdi LONG $0x01c00d48; WORD $0x0000 // or rax, 448 - LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax - LONG $0xe0c98148; WORD $0x0001; BYTE $0x00 // or rcx, 480 - LONG $0x244c8948; BYTE $0x30 // mov qword [rsp + 48], rcx + LONG $0x24448948; BYTE $0x18 // mov qword [rsp + 24], rax + WORD $0x8948; BYTE $0xf8 // mov rax, rdi + LONG $0x01e00d48; WORD $0x0000 // or rax, 480 + LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax QUAD $0x012e3c203a0f4666 // pinsrb xmm15, byte [rsi + r13], 1 - QUAD $0x02063c203a0f4666 // pinsrb xmm15, byte [rsi + r8], 2 + QUAD $0x02263c203a0f4666 // pinsrb xmm15, byte [rsi + r12], 2 QUAD $0x03163c203a0f4466 // pinsrb xmm15, byte [rsi + rdx], 3 - WORD $0x894c; BYTE $0xcf // mov rdi, r9 - LONG $0x244c894c; BYTE $0x38 // mov qword [rsp + 56], r9 - QUAD $0x040e3c203a0f4666 // pinsrb xmm15, byte [rsi + r9], 4 - QUAD $0x05263c203a0f4666 // pinsrb xmm15, byte [rsi + r12], 5 - LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] - QUAD $0x060e3c203a0f4666 // pinsrb xmm15, byte [rsi + r9], 6 - LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] - QUAD $0x072e3c203a0f4666 // pinsrb xmm15, byte [rsi + r13], 7 - LONG $0x245c894c; BYTE $0x20 // mov qword [rsp + 32], r11 - QUAD $0x081e3c203a0f4666 // pinsrb xmm15, byte [rsi + r11], 8 - QUAD $0x09363c203a0f4666 // pinsrb xmm15, byte [rsi + r14], 9 - QUAD $0x0a163c203a0f4666 // pinsrb xmm15, byte [rsi + r10], 10 - QUAD $0x0b3e3c203a0f4666 // pinsrb xmm15, byte [rsi + r15], 11 - QUAD $0x0c1e3c203a0f4466 // pinsrb xmm15, byte [rsi + rbx], 12 - LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] + LONG $0x246c8b4c; BYTE $0x40 // mov r13, qword [rsp + 64] + QUAD $0x042e3c203a0f4666 // pinsrb xmm15, byte [rsi + r13], 4 + WORD $0x894c; BYTE $0xdf // mov rdi, r11 + QUAD $0x051e3c203a0f4666 // pinsrb xmm15, byte [rsi + r11], 5 + LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] + QUAD $0x063e3c203a0f4666 // pinsrb xmm15, byte [rsi + r15], 6 + QUAD $0x070e3c203a0f4666 // pinsrb xmm15, byte [rsi + r9], 7 + QUAD $0x08363c203a0f4666 // pinsrb xmm15, byte [rsi + r14], 8 + QUAD $0x09163c203a0f4666 // pinsrb xmm15, byte [rsi + r10], 9 + QUAD $0x0a063c203a0f4666 // pinsrb xmm15, byte [rsi + r8], 10 + QUAD $0x0b1e3c203a0f4466 // pinsrb xmm15, byte [rsi + rbx], 11 + QUAD $0x0c0e3c203a0f4466 // pinsrb xmm15, byte [rsi + rcx], 12 + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x0d163c203a0f4466 // pinsrb xmm15, byte [rsi + rdx], 13 - QUAD $0x0e063c203a0f4466 // pinsrb xmm15, byte [rsi + rax], 14 - QUAD $0x0f0e3c203a0f4466 // pinsrb xmm15, byte [rsi + rcx], 15 - LONG $0x245c8b4c; BYTE $0x28 // mov r11, qword [rsp + 40] + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + QUAD $0x0e163c203a0f4466 // pinsrb xmm15, byte [rsi + rdx], 14 + QUAD $0x0f063c203a0f4466 // pinsrb xmm15, byte [rsi + rax], 15 + LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] QUAD $0x011e6c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r11 + 1], 1 - QUAD $0x01066c203a0f4266; BYTE $0x02 // pinsrb xmm5, byte [rsi + r8 + 1], 2 - WORD $0x894d; BYTE $0xc3 // mov r11, r8 - LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] - QUAD $0x01066c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r8 + 1], 3 - QUAD $0x04013e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 1], 4 - QUAD $0x01266c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r12 + 1], 5 - WORD $0x894c; BYTE $0xe7 // mov rdi, r12 - QUAD $0x010e6c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r9 + 1], 6 - QUAD $0x012e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r13 + 1], 7 - WORD $0x894d; BYTE $0xec // mov r12, r13 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] - QUAD $0x0801166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 1], 8 - QUAD $0x01366c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r14 + 1], 9 - WORD $0x894d; BYTE $0xf1 // mov r9, r14 - QUAD $0x01166c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r10 + 1], 10 - QUAD $0x013e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 1], 11 - QUAD $0x0c011e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 1], 12 - WORD $0x8949; BYTE $0xdd // mov r13, rbx - QUAD $0x000000c0249c8948 // mov qword [rsp + 192], rbx - LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] - QUAD $0x013e6c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r15 + 1], 13 - QUAD $0x0e01066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 1], 14 - QUAD $0x0f010e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 1], 15 - QUAD $0x00a0248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 160] + QUAD $0x01266c203a0f4266; BYTE $0x02 // pinsrb xmm5, byte [rsi + r12 + 1], 2 + LONG $0x245c8b4c; BYTE $0x68 // mov r11, qword [rsp + 104] + QUAD $0x011e6c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r11 + 1], 3 + QUAD $0x012e6c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r13 + 1], 4 + QUAD $0x05013e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 1], 5 + QUAD $0x013e6c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r15 + 1], 6 + QUAD $0x010e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r9 + 1], 7 + WORD $0x894d; BYTE $0xcd // mov r13, r9 + QUAD $0x01366c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r14 + 1], 8 + QUAD $0x01166c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r10 + 1], 9 + QUAD $0x01066c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r8 + 1], 10 + QUAD $0x0b011e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 1], 11 + QUAD $0x0c010e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 1], 12 + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] + QUAD $0x0d010e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 1], 13 + QUAD $0x0e01166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 1], 14 + QUAD $0x0f01066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 1], 15 + QUAD $0x0090248c6f0f4466; WORD $0x0000 // movdqa xmm9, oword [rsp + 144] LONG $0x640f4166; BYTE $0xe9 // pcmpgtb xmm5, xmm9 LONG $0xfd6f0f66 // movdqa xmm7, xmm5 QUAD $0x000000a0a56f0f66 // movdqa xmm4, oword 160[rbp] /* [rip + .LCPI7_10] */ LONG $0xfcdb0f66 // pand xmm7, xmm4 LONG $0xfdf80f66 // psubb xmm7, xmm5 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] LONG $0x065cb60f; BYTE $0x0f // movzx ebx, byte [rsi + rax + 15] LONG $0x6e0f4466; BYTE $0xf3 // movd xmm14, ebx LONG $0x640f4566; BYTE $0xf9 // pcmpgtb xmm15, xmm9 - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] - QUAD $0x01021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 1 - QUAD $0x021e74203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rsi + r11 + 2], 2 - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - QUAD $0x020674203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r8 + 2], 3 - LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] - QUAD $0x023674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r14 + 2], 4 - QUAD $0x05023e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 2], 5 + LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] + QUAD $0x023674203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r14 + 2], 1 + WORD $0x894c; BYTE $0xe1 // mov rcx, r12 + QUAD $0x022674203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rsi + r12 + 2], 2 + WORD $0x894c; BYTE $0xda // mov rdx, r11 + QUAD $0x021e74203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r11 + 2], 3 LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] - QUAD $0x020674203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r8 + 2], 6 - QUAD $0x022674203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r12 + 2], 7 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x08021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 8 - QUAD $0x000000b0248c894c // mov qword [rsp + 176], r9 - QUAD $0x020e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r9 + 2], 9 - QUAD $0x021674203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r10 + 2], 10 + QUAD $0x020674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r8 + 2], 4 + QUAD $0x05023e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 2], 5 LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] - QUAD $0x022674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r12 + 2], 11 - QUAD $0x022e74203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r13 + 2], 12 - QUAD $0x023e74203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r15 + 2], 13 + QUAD $0x022674203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r12 + 2], 6 + QUAD $0x000000c0248c894c // mov qword [rsp + 192], r9 + QUAD $0x020e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r9 + 2], 7 + QUAD $0x0000008024948b4c // mov r10, qword [rsp + 128] + QUAD $0x021674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r10 + 2], 8 + LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] + QUAD $0x021e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r11 + 2], 9 + LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] + QUAD $0x023e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r15 + 2], 10 + LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] + QUAD $0x0b021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 11 + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] + QUAD $0x0c021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 12 LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] + QUAD $0x0d021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 13 + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] QUAD $0x0e021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 14 LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] QUAD $0x0f021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 15 @@ -35748,55 +37136,54 @@ LBB7_85: LONG $0xeb0f4166; BYTE $0xf7 // por xmm6, xmm15 LONG $0x065cb60f; BYTE $0x10 // movzx ebx, byte [rsi + rax + 16] LONG $0x6e0f4466; BYTE $0xfb // movd xmm15, ebx - WORD $0x8948; BYTE $0xd0 // mov rax, rdx - QUAD $0x01031654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 3], 1 - WORD $0x894d; BYTE $0xdd // mov r13, r11 - QUAD $0x031e54203a0f4266; BYTE $0x02 // pinsrb xmm2, byte [rsi + r11 + 3], 2 - WORD $0x8949; BYTE $0xcb // mov r11, rcx - QUAD $0x03030e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 3], 3 - QUAD $0x033654203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r14 + 3], 4 + QUAD $0x033654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r14 + 3], 1 + QUAD $0x02030e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 3], 2 + WORD $0x8948; BYTE $0xd3 // mov rbx, rdx + QUAD $0x03031654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 3], 3 + WORD $0x894d; BYTE $0xc1 // mov r9, r8 + QUAD $0x030654203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r8 + 3], 4 WORD $0x8948; BYTE $0xfa // mov rdx, rdi - LONG $0x247c8948; BYTE $0x68 // mov qword [rsp + 104], rdi + LONG $0x247c8948; BYTE $0x70 // mov qword [rsp + 112], rdi QUAD $0x05033e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 3], 5 - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - QUAD $0x030654203a0f4266; BYTE $0x06 // pinsrb xmm2, byte [rsi + r8 + 3], 6 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] - QUAD $0x07033e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 3], 7 - LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] - QUAD $0x030654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r8 + 3], 8 - QUAD $0x030e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r9 + 3], 9 - QUAD $0x031654203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r10 + 3], 10 + WORD $0x894c; BYTE $0xe7 // mov rdi, r12 + QUAD $0x032654203a0f4266; BYTE $0x06 // pinsrb xmm2, byte [rsi + r12 + 3], 6 + QUAD $0x032e54203a0f4266; BYTE $0x07 // pinsrb xmm2, byte [rsi + r13 + 3], 7 + QUAD $0x031654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r10 + 3], 8 + QUAD $0x031e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r11 + 3], 9 + QUAD $0x033e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r15 + 3], 10 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] QUAD $0x032654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r12 + 3], 11 - QUAD $0x000000c024a48b4c // mov r12, qword [rsp + 192] - QUAD $0x032654203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r12 + 3], 12 - QUAD $0x033e54203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r15 + 3], 13 - LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] - QUAD $0x0e031e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 3], 14 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] - QUAD $0x0f031e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 3], 15 - QUAD $0x0104064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 4], 1 - QUAD $0x042e4c203a0f4266; BYTE $0x02 // pinsrb xmm1, byte [rsi + r13 + 4], 2 - LONG $0x246c894c; BYTE $0x60 // mov qword [rsp + 96], r13 - QUAD $0x041e4c203a0f4266; BYTE $0x03 // pinsrb xmm1, byte [rsi + r11 + 4], 3 - QUAD $0x04364c203a0f4266; BYTE $0x04 // pinsrb xmm1, byte [rsi + r14 + 4], 4 + LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] + QUAD $0x032e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r13 + 3], 12 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x0d030654203a0f66 // pinsrb xmm2, byte [rsi + rax + 3], 13 + LONG $0x24448b4c; BYTE $0x18 // mov r8, qword [rsp + 24] + QUAD $0x030654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r8 + 3], 14 + LONG $0x24448b4c; BYTE $0x30 // mov r8, qword [rsp + 48] + QUAD $0x030654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r8 + 3], 15 + QUAD $0x04364c203a0f4266; BYTE $0x01 // pinsrb xmm1, byte [rsi + r14 + 4], 1 + QUAD $0x02040e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 4], 2 + QUAD $0x03041e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 4], 3 + QUAD $0x040e4c203a0f4266; BYTE $0x04 // pinsrb xmm1, byte [rsi + r9 + 4], 4 QUAD $0x0504164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 4], 5 - QUAD $0x06040e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 4], 6 - QUAD $0x07043e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 4], 7 - QUAD $0x04064c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r8 + 4], 8 - QUAD $0x040e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r9 + 4], 9 - QUAD $0x04164c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r10 + 4], 10 - LONG $0x2454894c; BYTE $0x70 // mov qword [rsp + 112], r10 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0b04064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 4], 11 - QUAD $0x04264c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r12 + 4], 12 - QUAD $0x043e4c203a0f4266; BYTE $0x0d // pinsrb xmm1, byte [rsi + r15 + 4], 13 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x0e04164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 4], 14 - QUAD $0x0f041e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 4], 15 + QUAD $0x06043e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 4], 6 + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] + QUAD $0x0704064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 4], 7 + QUAD $0x04164c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r10 + 4], 8 + QUAD $0x041e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r11 + 4], 9 + QUAD $0x043e4c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r15 + 4], 10 + QUAD $0x04264c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r12 + 4], 11 + QUAD $0x042e4c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r13 + 4], 12 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x0d04064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 4], 13 + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + QUAD $0x04364c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rsi + r14 + 4], 14 + WORD $0x894c; BYTE $0xc2 // mov rdx, r8 + QUAD $0x04064c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r8 + 4], 15 + WORD $0x894d; BYTE $0xc7 // mov r15, r8 LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] - LONG $0x065cb60f; BYTE $0x11 // movzx ebx, byte [rsi + rax + 17] + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + LONG $0x165cb60f; BYTE $0x11 // movzx ebx, byte [rsi + rdx + 17] LONG $0xc36e0f66 // movd xmm0, ebx LONG $0x640f4166; BYTE $0xd1 // pcmpgtb xmm2, xmm9 QUAD $0x000000c0ad6f0f66 // movdqa xmm5, oword 192[rbp] /* [rip + .LCPI7_12] */ @@ -35805,90 +37192,92 @@ LBB7_85: QUAD $0x000000d0ad6f0f66 // movdqa xmm5, oword 208[rbp] /* [rip + .LCPI7_13] */ LONG $0xcddb0f66 // pand xmm1, xmm5 LONG $0xcaeb0f66 // por xmm1, xmm2 - LONG $0x065cb60f; BYTE $0x12 // movzx ebx, byte [rsi + rax + 18] + LONG $0x165cb60f; BYTE $0x12 // movzx ebx, byte [rsi + rdx + 18] LONG $0xeb6e0f66 // movd xmm5, ebx - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x050e44203a0f4466; BYTE $0x01 // pinsrb xmm8, byte [rsi + rcx + 5], 1 - QUAD $0x052e44203a0f4666; BYTE $0x02 // pinsrb xmm8, byte [rsi + r13 + 5], 2 - QUAD $0x051e44203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r11 + 5], 3 - QUAD $0x053644203a0f4666; BYTE $0x04 // pinsrb xmm8, byte [rsi + r14 + 5], 4 + LONG $0x24448b4c; BYTE $0x20 // mov r8, qword [rsp + 32] + QUAD $0x050644203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rsi + r8 + 5], 1 + QUAD $0x050e44203a0f4466; BYTE $0x02 // pinsrb xmm8, byte [rsi + rcx + 5], 2 LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] - QUAD $0x053e44203a0f4466; BYTE $0x05 // pinsrb xmm8, byte [rsi + rdi + 5], 5 - LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] - QUAD $0x051e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rbx + 5], 6 - LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] - QUAD $0x050e44203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r9 + 5], 7 - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] + QUAD $0x053e44203a0f4466; BYTE $0x03 // pinsrb xmm8, byte [rsi + rdi + 5], 3 + LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] + QUAD $0x050e44203a0f4666; BYTE $0x04 // pinsrb xmm8, byte [rsi + r9 + 5], 4 + LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x051644203a0f4466; BYTE $0x05 // pinsrb xmm8, byte [rsi + rdx + 5], 5 + LONG $0x24548b4c; BYTE $0x50 // mov r10, qword [rsp + 80] + QUAD $0x051644203a0f4666; BYTE $0x06 // pinsrb xmm8, byte [rsi + r10 + 5], 6 + QUAD $0x000000c0249c8b4c // mov r11, qword [rsp + 192] + QUAD $0x051e44203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r11 + 5], 7 + QUAD $0x0000008024a48b4c // mov r12, qword [rsp + 128] QUAD $0x052644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r12 + 5], 8 - QUAD $0x000000b024ac8b4c // mov r13, qword [rsp + 176] + LONG $0x246c8b4c; BYTE $0x78 // mov r13, qword [rsp + 120] QUAD $0x052e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r13 + 5], 9 - QUAD $0x051644203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r10 + 5], 10 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] - QUAD $0x053e44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r15 + 5], 11 - QUAD $0x000000c0249c8b48 // mov rbx, qword [rsp + 192] + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x051e44203a0f4466; BYTE $0x0a // pinsrb xmm8, byte [rsi + rbx + 5], 10 + LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] + QUAD $0x051e44203a0f4466; BYTE $0x0b // pinsrb xmm8, byte [rsi + rbx + 5], 11 + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] QUAD $0x051e44203a0f4466; BYTE $0x0c // pinsrb xmm8, byte [rsi + rbx + 5], 12 - QUAD $0x050644203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r8 + 5], 13 - QUAD $0x051644203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rdx + 5], 14 - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - QUAD $0x053644203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r14 + 5], 15 + QUAD $0x050644203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rsi + rax + 5], 13 + QUAD $0x053644203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r14 + 5], 14 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 + QUAD $0x053e44203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r15 + 5], 15 LONG $0x640f4566; BYTE $0xc1 // pcmpgtb xmm8, xmm9 QUAD $0x000000e0956f0f66 // movdqa xmm2, oword 224[rbp] /* [rip + .LCPI7_14] */ LONG $0xdb0f4466; BYTE $0xc2 // pand xmm8, xmm2 LONG $0xeb0f4466; BYTE $0xc1 // por xmm8, xmm1 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] LONG $0x065cb60f; BYTE $0x13 // movzx ebx, byte [rsi + rax + 19] LONG $0xfb6e0f66 // movd xmm7, ebx LONG $0xeb0f4466; BYTE $0xc6 // por xmm8, xmm6 LONG $0x065cb60f; BYTE $0x14 // movzx ebx, byte [rsi + rax + 20] LONG $0xf36e0f66 // movd xmm6, ebx - QUAD $0x01060e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 6], 1 - WORD $0x8949; BYTE $0xca // mov r10, rcx - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0206065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 2 - QUAD $0x061e5c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r11 + 6], 3 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x0406065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 4 - WORD $0x8949; BYTE $0xf8 // mov r8, rdi - QUAD $0x05063e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 6], 5 - LONG $0x244c8b48; BYTE $0x40 // mov rcx, qword [rsp + 64] - QUAD $0x06060e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 6], 6 - QUAD $0x060e5c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r9 + 6], 7 + QUAD $0x06065c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r8 + 6], 1 + QUAD $0x02060e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 6], 2 + WORD $0x8949; BYTE $0xfe // mov r14, rdi + QUAD $0x03063e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 6], 3 + WORD $0x894d; BYTE $0xcf // mov r15, r9 + QUAD $0x060e5c203a0f4266; BYTE $0x04 // pinsrb xmm3, byte [rsi + r9 + 6], 4 + QUAD $0x0506165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 6], 5 + WORD $0x894c; BYTE $0xd7 // mov rdi, r10 + QUAD $0x06165c203a0f4266; BYTE $0x06 // pinsrb xmm3, byte [rsi + r10 + 6], 6 + WORD $0x894d; BYTE $0xd9 // mov r9, r11 + QUAD $0x061e5c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r11 + 6], 7 + WORD $0x894d; BYTE $0xe2 // mov r10, r12 QUAD $0x06265c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r12 + 6], 8 - WORD $0x894d; BYTE $0xe1 // mov r9, r12 - WORD $0x894c; BYTE $0xe8 // mov rax, r13 + WORD $0x894c; BYTE $0xeb // mov rbx, r13 QUAD $0x062e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r13 + 6], 9 - LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] - QUAD $0x0a063e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 6], 10 - WORD $0x894c; BYTE $0xfb // mov rbx, r15 - QUAD $0x063e5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r15 + 6], 11 - QUAD $0x000000c024a48b4c // mov r12, qword [rsp + 192] - QUAD $0x06265c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r12 + 6], 12 - LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] - QUAD $0x0d06165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 6], 13 - LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] - QUAD $0x063e5c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rsi + r15 + 6], 14 - QUAD $0x06365c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r14 + 6], 15 - QUAD $0x0000d024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 208] - QUAD $0x071654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r10 + 7], 1 - LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] - QUAD $0x072e54203a0f4266; BYTE $0x02 // pinsrb xmm2, byte [rsi + r13 + 7], 2 - QUAD $0x071e54203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r11 + 7], 3 - LONG $0x24548b4c; BYTE $0x38 // mov r10, qword [rsp + 56] - QUAD $0x071654203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r10 + 7], 4 - QUAD $0x070654203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r8 + 7], 5 - WORD $0x894c; BYTE $0xc2 // mov rdx, r8 - QUAD $0x06070e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 7], 6 - LONG $0x244c8b48; BYTE $0x48 // mov rcx, qword [rsp + 72] - QUAD $0x07070e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 7], 7 - QUAD $0x070e54203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r9 + 7], 8 - QUAD $0x09070654203a0f66 // pinsrb xmm2, byte [rsi + rax + 7], 9 - QUAD $0x0a073e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 7], 10 - QUAD $0x0b071e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 7], 11 - QUAD $0x072654203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r12 + 7], 12 - LONG $0x245c8b4c; BYTE $0x08 // mov r11, qword [rsp + 8] + LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] + QUAD $0x0a06165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 6], 10 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] + QUAD $0x06265c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r12 + 6], 11 + LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] + QUAD $0x062e5c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r13 + 6], 12 + LONG $0x245c8b4c; BYTE $0x10 // mov r11, qword [rsp + 16] + QUAD $0x061e5c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r11 + 6], 13 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0e06065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 14 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0f06065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 6], 15 + QUAD $0x0000a024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 160] + QUAD $0x070654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r8 + 7], 1 + QUAD $0x02070e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 7], 2 + QUAD $0x073654203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r14 + 7], 3 + QUAD $0x073e54203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r15 + 7], 4 + WORD $0x894d; BYTE $0xfe // mov r14, r15 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x05070654203a0f66 // pinsrb xmm2, byte [rsi + rax + 7], 5 + QUAD $0x06073e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 7], 6 + QUAD $0x070e54203a0f4266; BYTE $0x07 // pinsrb xmm2, byte [rsi + r9 + 7], 7 + QUAD $0x071654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r10 + 7], 8 + QUAD $0x09071e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 7], 9 + QUAD $0x0a071654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 7], 10 + QUAD $0x072654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r12 + 7], 11 + QUAD $0x072e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r13 + 7], 12 QUAD $0x071e54203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r11 + 7], 13 - QUAD $0x073e54203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r15 + 7], 14 - WORD $0x894c; BYTE $0xf7 // mov rdi, r14 - QUAD $0x073654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r14 + 7], 15 + LONG $0x247c8b48; BYTE $0x18 // mov rdi, qword [rsp + 24] + QUAD $0x0e073e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 7], 14 + LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + QUAD $0x0f071654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 7], 15 LONG $0x640f4166; BYTE $0xd9 // pcmpgtb xmm3, xmm9 QUAD $0x000000f08d6f0f66 // movdqa xmm1, oword 240[rbp] /* [rip + .LCPI7_15] */ LONG $0xd9db0f66 // pand xmm3, xmm1 @@ -35898,326 +37287,322 @@ LBB7_85: LONG $0xd1db0f66 // pand xmm2, xmm1 LONG $0xd3eb0f66 // por xmm2, xmm3 LONG $0xca6f0f66 // movdqa xmm1, xmm2 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] - LONG $0x065cb60f; BYTE $0x15 // movzx ebx, byte [rsi + rax + 21] + LONG $0x24448b4c; BYTE $0x28 // mov r8, qword [rsp + 40] + LONG $0x5cb60f42; WORD $0x1506 // movzx ebx, byte [rsi + r8 + 21] LONG $0xd36e0f66 // movd xmm2, ebx - LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] - QUAD $0x090e54203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rcx + 9], 1 - QUAD $0x092e54203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r13 + 9], 2 - LONG $0x24448b4c; BYTE $0x78 // mov r8, qword [rsp + 120] - QUAD $0x090654203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r8 + 9], 3 - QUAD $0x091654203a0f4666; BYTE $0x04 // pinsrb xmm10, byte [rsi + r10 + 9], 4 - QUAD $0x091654203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rdx + 9], 5 - WORD $0x8949; BYTE $0xd6 // mov r14, rdx - LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] - QUAD $0x091654203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rdx + 9], 6 - LONG $0x244c8b4c; BYTE $0x48 // mov r9, qword [rsp + 72] - QUAD $0x090e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r9 + 9], 7 - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x091e54203a0f4466; BYTE $0x08 // pinsrb xmm10, byte [rsi + rbx + 9], 8 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] - QUAD $0x091e54203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rbx + 9], 9 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x093e54203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r15 + 9], 10 + LONG $0x244c8b4c; BYTE $0x20 // mov r9, qword [rsp + 32] + QUAD $0x090e54203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rsi + r9 + 9], 1 + QUAD $0x090e54203a0f4466; BYTE $0x02 // pinsrb xmm10, byte [rsi + rcx + 9], 2 + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] + QUAD $0x093e54203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r15 + 9], 3 + QUAD $0x093654203a0f4666; BYTE $0x04 // pinsrb xmm10, byte [rsi + r14 + 9], 4 + LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] + QUAD $0x091e54203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rbx + 9], 5 LONG $0x245c8b48; BYTE $0x50 // mov rbx, qword [rsp + 80] - QUAD $0x091e54203a0f4466; BYTE $0x0b // pinsrb xmm10, byte [rsi + rbx + 9], 11 - QUAD $0x092654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 9], 12 - QUAD $0x091e54203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r11 + 9], 13 - LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] - QUAD $0x091654203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rsi + r10 + 9], 14 - QUAD $0x093e54203a0f4466; BYTE $0x0f // pinsrb xmm10, byte [rsi + rdi + 9], 15 + QUAD $0x091e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rbx + 9], 6 + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] + QUAD $0x091654203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r10 + 9], 7 + QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] + QUAD $0x091e54203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r11 + 9], 8 + LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] + QUAD $0x091e54203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rbx + 9], 9 + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x091e54203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rbx + 9], 10 + QUAD $0x092654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r12 + 9], 11 + QUAD $0x092e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r13 + 9], 12 + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] + QUAD $0x091e54203a0f4466; BYTE $0x0d // pinsrb xmm10, byte [rsi + rbx + 9], 13 + QUAD $0x093e54203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rdi + 9], 14 + QUAD $0x091654203a0f4466; BYTE $0x0f // pinsrb xmm10, byte [rsi + rdx + 9], 15 LONG $0xeb0f4166; BYTE $0xc8 // por xmm1, xmm8 - QUAD $0x0000d0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm1 + QUAD $0x0000a0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm1 LONG $0x640f4566; BYTE $0xd1 // pcmpgtb xmm10, xmm9 LONG $0x6f0f4166; BYTE $0xca // movdqa xmm1, xmm10 LONG $0x6f0f4466; BYTE $0xc4 // movdqa xmm8, xmm4 LONG $0xccdb0f66 // pand xmm1, xmm4 LONG $0xf80f4166; BYTE $0xca // psubb xmm1, xmm10 - LONG $0x065cb60f; BYTE $0x16 // movzx ebx, byte [rsi + rax + 22] + LONG $0x5cb60f42; WORD $0x1606 // movzx ebx, byte [rsi + r8 + 22] LONG $0xdb6e0f66 // movd xmm3, ebx - QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] - QUAD $0x01080e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 8], 1 - WORD $0x8949; BYTE $0xcb // mov r11, rcx - QUAD $0x082e64203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r13 + 8], 2 - QUAD $0x080664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r8 + 8], 3 - LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] - QUAD $0x04083e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 8], 4 - QUAD $0x083664203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r14 + 8], 5 - QUAD $0x06081664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 8], 6 - WORD $0x894c; BYTE $0xca // mov rdx, r9 - QUAD $0x080e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r9 + 8], 7 - LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] - QUAD $0x083664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r14 + 8], 8 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] - QUAD $0x09080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 9 - WORD $0x894c; BYTE $0xfb // mov rbx, r15 - QUAD $0x083e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r15 + 8], 10 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] - QUAD $0x083e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 8], 11 - QUAD $0x082664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 8], 12 - LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] - QUAD $0x0d080e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 8], 13 - QUAD $0x081664203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r10 + 8], 14 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] - QUAD $0x080e64203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r9 + 8], 15 + QUAD $0x00013024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 304] + WORD $0x894c; BYTE $0xcb // mov rbx, r9 + QUAD $0x080e64203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r9 + 8], 1 + QUAD $0x02080e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 8], 2 + WORD $0x8949; BYTE $0xcc // mov r12, rcx + QUAD $0x083e64203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r15 + 8], 3 + WORD $0x894d; BYTE $0xf5 // mov r13, r14 + QUAD $0x083664203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r14 + 8], 4 + LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x05081664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 8], 5 + LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] + QUAD $0x06083e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 8], 6 + WORD $0x894d; BYTE $0xd1 // mov r9, r10 + QUAD $0x081664203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r10 + 8], 7 + WORD $0x894d; BYTE $0xda // mov r10, r11 + QUAD $0x081e64203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r11 + 8], 8 + LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] + QUAD $0x09080e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 8], 9 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0a080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 10 + LONG $0x24748b4c; BYTE $0x58 // mov r14, qword [rsp + 88] + QUAD $0x083664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r14 + 8], 11 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0c080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 12 + LONG $0x245c8b4c; BYTE $0x10 // mov r11, qword [rsp + 16] + QUAD $0x081e64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r11 + 8], 13 + LONG $0x24448b4c; BYTE $0x18 // mov r8, qword [rsp + 24] + QUAD $0x080664203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r8 + 8], 14 + LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] + QUAD $0x0f080664203a0f66 // pinsrb xmm4, byte [rsi + rax + 8], 15 LONG $0x640f4166; BYTE $0xe1 // pcmpgtb xmm4, xmm9 LONG $0xdb0f4166; BYTE $0xe0 // pand xmm4, xmm8 - QUAD $0x009024946f0f4466; WORD $0x0000 // movdqa xmm10, oword [rsp + 144] - QUAD $0x0a1e54203a0f4666; BYTE $0x01 // pinsrb xmm10, byte [rsi + r11 + 10], 1 - QUAD $0x0a2e54203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r13 + 10], 2 - QUAD $0x0a0654203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r8 + 10], 3 - QUAD $0x0a3e54203a0f4466; BYTE $0x04 // pinsrb xmm10, byte [rsi + rdi + 10], 4 - LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] - QUAD $0x0a3e54203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rdi + 10], 5 - LONG $0x24448b4c; BYTE $0x40 // mov r8, qword [rsp + 64] - QUAD $0x0a0654203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r8 + 10], 6 - QUAD $0x0a1654203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rdx + 10], 7 - WORD $0x894d; BYTE $0xf3 // mov r11, r14 - QUAD $0x0a3654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r14 + 10], 8 - QUAD $0x0a0654203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rax + 10], 9 - QUAD $0x0a1e54203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rbx + 10], 10 - QUAD $0x0a3e54203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r15 + 10], 11 - QUAD $0x0a2654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 10], 12 - QUAD $0x0a0e54203a0f4466; BYTE $0x0d // pinsrb xmm10, byte [rsi + rcx + 10], 13 - QUAD $0x0a1654203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rsi + r10 + 10], 14 - QUAD $0x0a0e54203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r9 + 10], 15 + QUAD $0x00b024946f0f4466; WORD $0x0000 // movdqa xmm10, oword [rsp + 176] + QUAD $0x0a1e54203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rbx + 10], 1 + QUAD $0x0a2654203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r12 + 10], 2 + QUAD $0x0a3e54203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r15 + 10], 3 + QUAD $0x0a2e54203a0f4666; BYTE $0x04 // pinsrb xmm10, byte [rsi + r13 + 10], 4 + QUAD $0x0a1654203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rdx + 10], 5 + QUAD $0x0a3e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rdi + 10], 6 + QUAD $0x0a0e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r9 + 10], 7 + QUAD $0x0a1654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r10 + 10], 8 + QUAD $0x0a0e54203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rcx + 10], 9 + LONG $0x247c8b4c; BYTE $0x48 // mov r15, qword [rsp + 72] + QUAD $0x0a3e54203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r15 + 10], 10 + QUAD $0x0a3654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r14 + 10], 11 + LONG $0x246c8b4c; BYTE $0x38 // mov r13, qword [rsp + 56] + QUAD $0x0a2e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r13 + 10], 12 + QUAD $0x0a1e54203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r11 + 10], 13 + QUAD $0x0a0654203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rsi + r8 + 10], 14 + WORD $0x894d; BYTE $0xc3 // mov r11, r8 + QUAD $0x0a0654203a0f4466; BYTE $0x0f // pinsrb xmm10, byte [rsi + rax + 10], 15 LONG $0x640f4566; BYTE $0xd1 // pcmpgtb xmm10, xmm9 QUAD $0x0000b095db0f4466; BYTE $0x00 // pand xmm10, oword 176[rbp] /* [rip + .LCPI7_11] */ LONG $0xeb0f4466; BYTE $0xd4 // por xmm10, xmm4 - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] - LONG $0x0e5cb60f; BYTE $0x17 // movzx ebx, byte [rsi + rcx + 23] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x065cb60f; BYTE $0x17 // movzx ebx, byte [rsi + rax + 23] LONG $0x6e0f4466; BYTE $0xc3 // movd xmm8, ebx LONG $0xeb0f4466; BYTE $0xd1 // por xmm10, xmm1 - QUAD $0x009024947f0f4466; WORD $0x0000 // movdqa oword [rsp + 144], xmm10 - LONG $0x0e5cb60f; BYTE $0x18 // movzx ebx, byte [rsi + rcx + 24] + QUAD $0x00b024947f0f4466; WORD $0x0000 // movdqa oword [rsp + 176], xmm10 + LONG $0x065cb60f; BYTE $0x18 // movzx ebx, byte [rsi + rax + 24] LONG $0x6e0f4466; BYTE $0xd3 // movd xmm10, ebx - LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] - QUAD $0x0b165c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rdx + 11], 1 - QUAD $0x0b2e5c203a0f4666; BYTE $0x02 // pinsrb xmm11, byte [rsi + r13 + 11], 2 - WORD $0x894d; BYTE $0xee // mov r14, r13 - LONG $0x244c8b48; BYTE $0x78 // mov rcx, qword [rsp + 120] - QUAD $0x0b0e5c203a0f4466; BYTE $0x03 // pinsrb xmm11, byte [rsi + rcx + 11], 3 - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] - QUAD $0x0b0e5c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rcx + 11], 4 - WORD $0x8949; BYTE $0xcd // mov r13, rcx - QUAD $0x0b3e5c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rdi + 11], 5 - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - QUAD $0x0b065c203a0f4666; BYTE $0x06 // pinsrb xmm11, byte [rsi + r8 + 11], 6 - LONG $0x247c8b48; BYTE $0x48 // mov rdi, qword [rsp + 72] - QUAD $0x0b3e5c203a0f4466; BYTE $0x07 // pinsrb xmm11, byte [rsi + rdi + 11], 7 - WORD $0x894d; BYTE $0xd8 // mov r8, r11 - QUAD $0x0b1e5c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r11 + 11], 8 - WORD $0x8949; BYTE $0xc1 // mov r9, rax - QUAD $0x0b065c203a0f4466; BYTE $0x09 // pinsrb xmm11, byte [rsi + rax + 11], 9 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] - QUAD $0x0b165c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r10 + 11], 10 - QUAD $0x0b3e5c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r15 + 11], 11 - QUAD $0x0b265c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r12 + 11], 12 - LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - QUAD $0x0b065c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rax + 11], 13 + LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] + QUAD $0x0b365c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rsi + r14 + 11], 1 + WORD $0x894c; BYTE $0xe1 // mov rcx, r12 + QUAD $0x000000d024a4894c // mov qword [rsp + 208], r12 + QUAD $0x0b265c203a0f4666; BYTE $0x02 // pinsrb xmm11, byte [rsi + r12 + 11], 2 + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] + QUAD $0x0b065c203a0f4466; BYTE $0x03 // pinsrb xmm11, byte [rsi + rax + 11], 3 + LONG $0x245c8b48; BYTE $0x40 // mov rbx, qword [rsp + 64] + QUAD $0x0b1e5c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rbx + 11], 4 + QUAD $0x0b165c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rdx + 11], 5 + QUAD $0x0b3e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rdi + 11], 6 + WORD $0x894d; BYTE $0xc8 // mov r8, r9 + QUAD $0x0b0e5c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r9 + 11], 7 + WORD $0x894d; BYTE $0xd1 // mov r9, r10 + QUAD $0x0b165c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r10 + 11], 8 + LONG $0x24548b4c; BYTE $0x78 // mov r10, qword [rsp + 120] + QUAD $0x0b165c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r10 + 11], 9 + QUAD $0x0b3e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r15 + 11], 10 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] + QUAD $0x0b265c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r12 + 11], 11 + QUAD $0x0b2e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r13 + 11], 12 LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] - QUAD $0x0b1e5c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rbx + 11], 14 + QUAD $0x0b1e5c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rbx + 11], 13 + QUAD $0x0b1e5c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r11 + 11], 14 LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] QUAD $0x0b1e5c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r11 + 11], 15 - QUAD $0x0c166c203a0f4466; BYTE $0x01 // pinsrb xmm13, byte [rsi + rdx + 12], 1 - QUAD $0x0c366c203a0f4666; BYTE $0x02 // pinsrb xmm13, byte [rsi + r14 + 12], 2 - LONG $0x24748b4c; BYTE $0x78 // mov r14, qword [rsp + 120] - QUAD $0x0c366c203a0f4666; BYTE $0x03 // pinsrb xmm13, byte [rsi + r14 + 12], 3 - QUAD $0x0c2e6c203a0f4666; BYTE $0x04 // pinsrb xmm13, byte [rsi + r13 + 12], 4 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] - QUAD $0x0c2e6c203a0f4666; BYTE $0x05 // pinsrb xmm13, byte [rsi + r13 + 12], 5 - QUAD $0x0c0e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rcx + 12], 6 - QUAD $0x0c3e6c203a0f4466; BYTE $0x07 // pinsrb xmm13, byte [rsi + rdi + 12], 7 - QUAD $0x0c066c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r8 + 12], 8 - QUAD $0x0c0e6c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r9 + 12], 9 - QUAD $0x0c166c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r10 + 12], 10 - QUAD $0x0c3e6c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r15 + 12], 11 - QUAD $0x0c266c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r12 + 12], 12 - QUAD $0x0c066c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rax + 12], 13 - WORD $0x8949; BYTE $0xc5 // mov r13, rax + QUAD $0x0c366c203a0f4666; BYTE $0x01 // pinsrb xmm13, byte [rsi + r14 + 12], 1 + QUAD $0x0c0e6c203a0f4466; BYTE $0x02 // pinsrb xmm13, byte [rsi + rcx + 12], 2 + QUAD $0x0c066c203a0f4466; BYTE $0x03 // pinsrb xmm13, byte [rsi + rax + 12], 3 + LONG $0x24748b4c; BYTE $0x40 // mov r14, qword [rsp + 64] + QUAD $0x0c366c203a0f4666; BYTE $0x04 // pinsrb xmm13, byte [rsi + r14 + 12], 4 + QUAD $0x0c166c203a0f4466; BYTE $0x05 // pinsrb xmm13, byte [rsi + rdx + 12], 5 + QUAD $0x0c3e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rdi + 12], 6 + QUAD $0x0c066c203a0f4666; BYTE $0x07 // pinsrb xmm13, byte [rsi + r8 + 12], 7 + QUAD $0x0c0e6c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r9 + 12], 8 + QUAD $0x0c166c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r10 + 12], 9 + QUAD $0x0c3e6c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r15 + 12], 10 + QUAD $0x0c266c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r12 + 12], 11 + QUAD $0x0c2e6c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r13 + 12], 12 + QUAD $0x0c1e6c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rbx + 12], 13 + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] QUAD $0x0c1e6c203a0f4466; BYTE $0x0e // pinsrb xmm13, byte [rsi + rbx + 12], 14 - WORD $0x894c; BYTE $0xd8 // mov rax, r11 QUAD $0x0c1e6c203a0f4666; BYTE $0x0f // pinsrb xmm13, byte [rsi + r11 + 12], 15 - QUAD $0x0d1664203a0f4466; BYTE $0x01 // pinsrb xmm12, byte [rsi + rdx + 13], 1 - LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] - QUAD $0x0d1e64203a0f4666; BYTE $0x02 // pinsrb xmm12, byte [rsi + r11 + 13], 2 - QUAD $0x0d3664203a0f4666; BYTE $0x03 // pinsrb xmm12, byte [rsi + r14 + 13], 3 - LONG $0x24548b48; BYTE $0x38 // mov rdx, qword [rsp + 56] - QUAD $0x0d1664203a0f4466; BYTE $0x04 // pinsrb xmm12, byte [rsi + rdx + 13], 4 - LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x0d0664203a0f4466; BYTE $0x01 // pinsrb xmm12, byte [rsi + rax + 13], 1 + QUAD $0x0d0e64203a0f4466; BYTE $0x02 // pinsrb xmm12, byte [rsi + rcx + 13], 2 + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + QUAD $0x0d0e64203a0f4466; BYTE $0x03 // pinsrb xmm12, byte [rsi + rcx + 13], 3 + QUAD $0x0d3664203a0f4666; BYTE $0x04 // pinsrb xmm12, byte [rsi + r14 + 13], 4 QUAD $0x0d1664203a0f4466; BYTE $0x05 // pinsrb xmm12, byte [rsi + rdx + 13], 5 - QUAD $0x0d0e64203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rcx + 13], 6 - QUAD $0x0d3e64203a0f4466; BYTE $0x07 // pinsrb xmm12, byte [rsi + rdi + 13], 7 - QUAD $0x0d0664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r8 + 13], 8 - QUAD $0x0d0e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r9 + 13], 9 - QUAD $0x0d1664203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r10 + 13], 10 - QUAD $0x0d3e64203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r15 + 13], 11 - QUAD $0x0d2664203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r12 + 13], 12 - WORD $0x894c; BYTE $0xef // mov rdi, r13 - QUAD $0x0d2e64203a0f4666; BYTE $0x0d // pinsrb xmm12, byte [rsi + r13 + 13], 13 + QUAD $0x0d3e64203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rdi + 13], 6 + QUAD $0x0d0664203a0f4666; BYTE $0x07 // pinsrb xmm12, byte [rsi + r8 + 13], 7 + QUAD $0x0d0e64203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r9 + 13], 8 + QUAD $0x0d1664203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r10 + 13], 9 + QUAD $0x0d3e64203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r15 + 13], 10 + QUAD $0x0d2664203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r12 + 13], 11 + QUAD $0x0d2e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r13 + 13], 12 + LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] + QUAD $0x0d3e64203a0f4466; BYTE $0x0d // pinsrb xmm12, byte [rsi + rdi + 13], 13 QUAD $0x0d1e64203a0f4466; BYTE $0x0e // pinsrb xmm12, byte [rsi + rbx + 13], 14 - QUAD $0x0d0664203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rsi + rax + 13], 15 + QUAD $0x0d1e64203a0f4666; BYTE $0x0f // pinsrb xmm12, byte [rsi + r11 + 13], 15 LONG $0x640f4566; BYTE $0xd9 // pcmpgtb xmm11, xmm9 QUAD $0x0000c09ddb0f4466; BYTE $0x00 // pand xmm11, oword 192[rbp] /* [rip + .LCPI7_12] */ LONG $0x640f4566; BYTE $0xe9 // pcmpgtb xmm13, xmm9 QUAD $0x0000d0addb0f4466; BYTE $0x00 // pand xmm13, oword 208[rbp] /* [rip + .LCPI7_13] */ LONG $0xeb0f4566; BYTE $0xeb // por xmm13, xmm11 - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] - LONG $0x065cb60f; BYTE $0x19 // movzx ebx, byte [rsi + rax + 25] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x3e5cb60f; BYTE $0x19 // movzx ebx, byte [rsi + rdi + 25] LONG $0xcb6e0f66 // movd xmm1, ebx LONG $0x640f4566; BYTE $0xe1 // pcmpgtb xmm12, xmm9 QUAD $0x0000e0a5db0f4466; BYTE $0x00 // pand xmm12, oword 224[rbp] /* [rip + .LCPI7_14] */ LONG $0xeb0f4566; BYTE $0xe5 // por xmm12, xmm13 - LONG $0x065cb60f; BYTE $0x1a // movzx ebx, byte [rsi + rax + 26] + LONG $0x3e5cb60f; BYTE $0x1a // movzx ebx, byte [rsi + rdi + 26] LONG $0x6e0f4466; BYTE $0xdb // movd xmm11, ebx - QUAD $0x00013024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 304] - LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] QUAD $0x010e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 14], 1 - WORD $0x894d; BYTE $0xdd // mov r13, r11 - QUAD $0x0e1e64203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r11 + 14], 2 - WORD $0x894d; BYTE $0xf3 // mov r11, r14 - QUAD $0x0e3664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r14 + 14], 3 - LONG $0x24748b4c; BYTE $0x38 // mov r14, qword [rsp + 56] - QUAD $0x0e3664203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r14 + 14], 4 - WORD $0x8948; BYTE $0xd0 // mov rax, rdx + QUAD $0x000000d0249c8b48 // mov rbx, qword [rsp + 208] + QUAD $0x020e1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 14], 2 + WORD $0x8949; BYTE $0xcb // mov r11, rcx + QUAD $0x030e0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 14], 3 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x040e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 14], 4 + WORD $0x8948; BYTE $0xd1 // mov rcx, rdx QUAD $0x050e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 14], 5 - QUAD $0x060e0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 14], 6 - LONG $0x24548b48; BYTE $0x48 // mov rdx, qword [rsp + 72] - QUAD $0x070e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 14], 7 - QUAD $0x0e0664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r8 + 14], 8 - QUAD $0x0e0e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 14], 9 - WORD $0x894c; BYTE $0xd3 // mov rbx, r10 - QUAD $0x0e1664203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r10 + 14], 10 - QUAD $0x0e3e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 14], 11 - QUAD $0x0e2664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 14], 12 - QUAD $0x0d0e3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 14], 13 - LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] + LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] + QUAD $0x060e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 14], 6 + QUAD $0x0e0664203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r8 + 14], 7 + QUAD $0x0e0e64203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r9 + 14], 8 + QUAD $0x0e1664203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r10 + 14], 9 + QUAD $0x0e3e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r15 + 14], 10 + QUAD $0x0e2664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r12 + 14], 11 + QUAD $0x0e2e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r13 + 14], 12 + LONG $0x24748b4c; BYTE $0x10 // mov r14, qword [rsp + 16] + QUAD $0x0e3664203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r14 + 14], 13 + LONG $0x247c8b48; BYTE $0x18 // mov rdi, qword [rsp + 24] QUAD $0x0e0e3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 14], 14 - LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] - QUAD $0x0e1664203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r10 + 14], 15 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + QUAD $0x0f0e3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 14], 15 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x0f3e74203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rsi + rdi + 15], 1 - QUAD $0x0f2e74203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r13 + 15], 2 + QUAD $0x0f1e74203a0f4466; BYTE $0x02 // pinsrb xmm14, byte [rsi + rbx + 15], 2 QUAD $0x0f1e74203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r11 + 15], 3 - QUAD $0x0f3674203a0f4666; BYTE $0x04 // pinsrb xmm14, byte [rsi + r14 + 15], 4 - QUAD $0x0f0674203a0f4466; BYTE $0x05 // pinsrb xmm14, byte [rsi + rax + 15], 5 - QUAD $0x0f0e74203a0f4466; BYTE $0x06 // pinsrb xmm14, byte [rsi + rcx + 15], 6 - QUAD $0x0f1674203a0f4466; BYTE $0x07 // pinsrb xmm14, byte [rsi + rdx + 15], 7 - QUAD $0x0f0674203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r8 + 15], 8 - QUAD $0x0f0e74203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r9 + 15], 9 - QUAD $0x0f1e74203a0f4466; BYTE $0x0a // pinsrb xmm14, byte [rsi + rbx + 15], 10 - QUAD $0x0f3e74203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r15 + 15], 11 - QUAD $0x0f2674203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r12 + 15], 12 - LONG $0x247c8b48; BYTE $0x08 // mov rdi, qword [rsp + 8] - QUAD $0x0f3e74203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rsi + rdi + 15], 13 - LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] + QUAD $0x0f0674203a0f4466; BYTE $0x04 // pinsrb xmm14, byte [rsi + rax + 15], 4 + QUAD $0x0f0e74203a0f4466; BYTE $0x05 // pinsrb xmm14, byte [rsi + rcx + 15], 5 + QUAD $0x0f1674203a0f4466; BYTE $0x06 // pinsrb xmm14, byte [rsi + rdx + 15], 6 + QUAD $0x0f0674203a0f4666; BYTE $0x07 // pinsrb xmm14, byte [rsi + r8 + 15], 7 + QUAD $0x0f0e74203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r9 + 15], 8 + QUAD $0x0f1674203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r10 + 15], 9 + QUAD $0x0f3e74203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rsi + r15 + 15], 10 + QUAD $0x0f2674203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r12 + 15], 11 + QUAD $0x0f2e74203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r13 + 15], 12 + QUAD $0x0f3674203a0f4666; BYTE $0x0d // pinsrb xmm14, byte [rsi + r14 + 15], 13 + LONG $0x247c8b48; BYTE $0x18 // mov rdi, qword [rsp + 24] QUAD $0x0f3e74203a0f4466; BYTE $0x0e // pinsrb xmm14, byte [rsi + rdi + 15], 14 - QUAD $0x0f1674203a0f4666; BYTE $0x0f // pinsrb xmm14, byte [rsi + r10 + 15], 15 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] + QUAD $0x0f3674203a0f4666; BYTE $0x0f // pinsrb xmm14, byte [rsi + r14 + 15], 15 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x103e7c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rsi + rdi + 16], 1 - QUAD $0x102e7c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r13 + 16], 2 + QUAD $0x101e7c203a0f4466; BYTE $0x02 // pinsrb xmm15, byte [rsi + rbx + 16], 2 QUAD $0x101e7c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r11 + 16], 3 - QUAD $0x10367c203a0f4666; BYTE $0x04 // pinsrb xmm15, byte [rsi + r14 + 16], 4 - QUAD $0x10067c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rax + 16], 5 - QUAD $0x100e7c203a0f4466; BYTE $0x06 // pinsrb xmm15, byte [rsi + rcx + 16], 6 - QUAD $0x10167c203a0f4466; BYTE $0x07 // pinsrb xmm15, byte [rsi + rdx + 16], 7 - QUAD $0x10067c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r8 + 16], 8 - QUAD $0x100e7c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r9 + 16], 9 - QUAD $0x101e7c203a0f4466; BYTE $0x0a // pinsrb xmm15, byte [rsi + rbx + 16], 10 - QUAD $0x103e7c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r15 + 16], 11 - QUAD $0x10267c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r12 + 16], 12 - LONG $0x247c8b48; BYTE $0x08 // mov rdi, qword [rsp + 8] + QUAD $0x10067c203a0f4466; BYTE $0x04 // pinsrb xmm15, byte [rsi + rax + 16], 4 + QUAD $0x100e7c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rcx + 16], 5 + QUAD $0x10167c203a0f4466; BYTE $0x06 // pinsrb xmm15, byte [rsi + rdx + 16], 6 + QUAD $0x10067c203a0f4666; BYTE $0x07 // pinsrb xmm15, byte [rsi + r8 + 16], 7 + QUAD $0x100e7c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r9 + 16], 8 + QUAD $0x10167c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r10 + 16], 9 + QUAD $0x103e7c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r15 + 16], 10 + QUAD $0x10267c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r12 + 16], 11 + QUAD $0x102e7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r13 + 16], 12 + LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] QUAD $0x103e7c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rsi + rdi + 16], 13 - LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] - QUAD $0x10167c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r10 + 16], 14 - LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x24748b4c; BYTE $0x18 // mov r14, qword [rsp + 24] + QUAD $0x10367c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r14 + 16], 14 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] QUAD $0x01113e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 17], 1 - QUAD $0x112e44203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r13 + 17], 2 + QUAD $0x02111e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 17], 2 QUAD $0x111e44203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r11 + 17], 3 - QUAD $0x113644203a0f4266; BYTE $0x04 // pinsrb xmm0, byte [rsi + r14 + 17], 4 - QUAD $0x05110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 5 - WORD $0x8949; BYTE $0xc5 // mov r13, rax - QUAD $0x06110e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 17], 6 - QUAD $0x07111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 7 - QUAD $0x110644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r8 + 17], 8 - QUAD $0x110e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 17], 9 - QUAD $0x0a111e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 17], 10 - QUAD $0x113e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r15 + 17], 11 - QUAD $0x112644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r12 + 17], 12 - LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] - QUAD $0x0d110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 13 + QUAD $0x04110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 4 + QUAD $0x05110e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 17], 5 + QUAD $0x06111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 6 + QUAD $0x110644203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r8 + 17], 7 + QUAD $0x110e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r9 + 17], 8 + QUAD $0x111644203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r10 + 17], 9 + QUAD $0x113e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r15 + 17], 10 + QUAD $0x112644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r12 + 17], 11 + QUAD $0x112e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r13 + 17], 12 LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] - QUAD $0x0e113e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 17], 14 - QUAD $0x009024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 144] - LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] - LONG $0x065cb60f; BYTE $0x1b // movzx ebx, byte [rsi + rax + 27] + QUAD $0x0d113e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 17], 13 + QUAD $0x113644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r14 + 17], 14 + QUAD $0x00b024a4eb0f4466; WORD $0x0000 // por xmm12, oword [rsp + 176] + LONG $0x247c8b48; BYTE $0x28 // mov rdi, qword [rsp + 40] + LONG $0x3e5cb60f; BYTE $0x1b // movzx ebx, byte [rsi + rdi + 27] LONG $0x6e0f4466; BYTE $0xcb // movd xmm9, ebx - QUAD $0x00a024ac6f0f4466; WORD $0x0000 // movdqa xmm13, oword [rsp + 160] + QUAD $0x009024ac6f0f4466; WORD $0x0000 // movdqa xmm13, oword [rsp + 144] LONG $0x640f4166; BYTE $0xe5 // pcmpgtb xmm4, xmm13 QUAD $0x000000f0a5db0f66 // pand xmm4, oword 240[rbp] /* [rip + .LCPI7_15] */ LONG $0x640f4566; BYTE $0xf5 // pcmpgtb xmm14, xmm13 LONG $0x710f4166; WORD $0x07f6 // psllw xmm14, 7 LONG $0xdb0f4466; WORD $0x6075 // pand xmm14, oword 96[rbp] /* [rip + .LCPI7_6] */ LONG $0xeb0f4466; BYTE $0xf4 // por xmm14, xmm4 - LONG $0x065cb60f; BYTE $0x1c // movzx ebx, byte [rsi + rax + 28] + LONG $0x3e5cb60f; BYTE $0x1c // movzx ebx, byte [rsi + rdi + 28] LONG $0xe36e0f66 // movd xmm4, ebx - LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] - QUAD $0x111644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r10 + 17], 15 + LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] + QUAD $0x113644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r14 + 17], 15 LONG $0xeb0f4566; BYTE $0xf4 // por xmm14, xmm12 LONG $0x640f4166; BYTE $0xc5 // pcmpgtb xmm0, xmm13 LONG $0x6f0f4466; BYTE $0xe8 // movdqa xmm13, xmm0 QUAD $0x0000a0a56f0f4466; BYTE $0x00 // movdqa xmm12, oword 160[rbp] /* [rip + .LCPI7_10] */ LONG $0xdb0f4566; BYTE $0xec // pand xmm13, xmm12 LONG $0xf80f4466; BYTE $0xe8 // psubb xmm13, xmm0 - QUAD $0x009024ac7f0f4466; WORD $0x0000 // movdqa oword [rsp + 144], xmm13 - LONG $0x065cb60f; BYTE $0x1d // movzx ebx, byte [rsi + rax + 29] + QUAD $0x00b024ac7f0f4466; WORD $0x0000 // movdqa oword [rsp + 176], xmm13 + LONG $0x3e5cb60f; BYTE $0x1d // movzx ebx, byte [rsi + rdi + 29] LONG $0x6e0f4466; BYTE $0xeb // movd xmm13, ebx - QUAD $0x10167c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r10 + 16], 15 - QUAD $0x0000a024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 160] + QUAD $0x10367c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r14 + 16], 15 + QUAD $0x00009024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 144] LONG $0x640f4466; BYTE $0xf8 // pcmpgtb xmm15, xmm0 - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] - QUAD $0x01121e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 18], 1 - LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x01123e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 18], 1 + QUAD $0x000000d0249c8b48 // mov rbx, qword [rsp + 208] QUAD $0x02121e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 18], 2 QUAD $0x121e6c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r11 + 18], 3 - QUAD $0x12366c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r14 + 18], 4 - QUAD $0x122e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r13 + 18], 5 - QUAD $0x06120e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 18], 6 - QUAD $0x0712166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 7 - QUAD $0x12066c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r8 + 18], 8 - QUAD $0x120e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r9 + 18], 9 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x0a12066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 18], 10 - QUAD $0x123e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 18], 11 - QUAD $0x12266c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r12 + 18], 12 - LONG $0x245c8b48; BYTE $0x08 // mov rbx, qword [rsp + 8] + QUAD $0x0412066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 18], 4 + QUAD $0x05120e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 18], 5 + QUAD $0x0612166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 18], 6 + QUAD $0x12066c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r8 + 18], 7 + QUAD $0x120e6c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r9 + 18], 8 + QUAD $0x12166c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r10 + 18], 9 + QUAD $0x123e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r15 + 18], 10 + QUAD $0x12266c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r12 + 18], 11 + QUAD $0x122e6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r13 + 18], 12 + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] QUAD $0x0d121e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 18], 13 - QUAD $0x0e123e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 18], 14 + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] + QUAD $0x0e121e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 18], 14 LONG $0xdb0f4566; BYTE $0xfc // pand xmm15, xmm12 - QUAD $0x12166c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r10 + 18], 15 + QUAD $0x12366c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r14 + 18], 15 LONG $0xe8640f66 // pcmpgtb xmm5, xmm0 QUAD $0x000000b0addb0f66 // pand xmm5, oword 176[rbp] /* [rip + .LCPI7_11] */ LONG $0xeb0f4166; BYTE $0xef // por xmm5, xmm15 - LONG $0x247c8b48; BYTE $0x18 // mov rdi, qword [rsp + 24] - LONG $0x3e5cb60f; BYTE $0x1e // movzx ebx, byte [rsi + rdi + 30] + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + LONG $0x065cb60f; BYTE $0x1e // movzx ebx, byte [rsi + rax + 30] LONG $0x6e0f4466; BYTE $0xe3 // movd xmm12, ebx - LONG $0x245c8b48; BYTE $0x28 // mov rbx, qword [rsp + 40] - QUAD $0x01131e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 19], 1 - QUAD $0x01141e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 20], 1 - QUAD $0x01151e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 21], 1 - QUAD $0x01161e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 22], 1 - QUAD $0x171e44203a0f4466; BYTE $0x01 // pinsrb xmm8, byte [rsi + rbx + 23], 1 - QUAD $0x181e54203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rbx + 24], 1 - QUAD $0x01191e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 25], 1 - QUAD $0x1a1e5c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rbx + 26], 1 - QUAD $0x1b1e4c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rbx + 27], 1 - QUAD $0x011c1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 28], 1 - QUAD $0x1d1e6c203a0f4466; BYTE $0x01 // pinsrb xmm13, byte [rsi + rbx + 29], 1 - QUAD $0x1e1e64203a0f4466; BYTE $0x01 // pinsrb xmm12, byte [rsi + rbx + 30], 1 - LONG $0x3e7cb60f; BYTE $0x1f // movzx edi, byte [rsi + rdi + 31] + WORD $0x8948; BYTE $0xfb // mov rbx, rdi + QUAD $0x01133e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 19], 1 + QUAD $0x01143e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 20], 1 + QUAD $0x01153e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 21], 1 + QUAD $0x01163e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 22], 1 + QUAD $0x173e44203a0f4466; BYTE $0x01 // pinsrb xmm8, byte [rsi + rdi + 23], 1 + QUAD $0x183e54203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rdi + 24], 1 + QUAD $0x01193e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 25], 1 + QUAD $0x1a3e5c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rdi + 26], 1 + QUAD $0x1b3e4c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rdi + 27], 1 + QUAD $0x011c3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 28], 1 + QUAD $0x1d3e6c203a0f4466; BYTE $0x01 // pinsrb xmm13, byte [rsi + rdi + 29], 1 + QUAD $0x1e3e64203a0f4466; BYTE $0x01 // pinsrb xmm12, byte [rsi + rdi + 30], 1 + LONG $0x067cb60f; BYTE $0x1f // movzx edi, byte [rsi + rax + 31] LONG $0xc76e0f66 // movd xmm0, edi QUAD $0x011f1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 31], 1 - LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x02133e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 19], 2 QUAD $0x02143e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 20], 2 QUAD $0x02153e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 21], 2 @@ -36232,84 +37617,85 @@ LBB7_85: QUAD $0x1e3e64203a0f4466; BYTE $0x02 // pinsrb xmm12, byte [rsi + rdi + 30], 2 QUAD $0x021f3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 31], 2 QUAD $0x131e7c203a0f4266; BYTE $0x03 // pinsrb xmm7, byte [rsi + r11 + 19], 3 - QUAD $0x13367c203a0f4266; BYTE $0x04 // pinsrb xmm7, byte [rsi + r14 + 19], 4 - QUAD $0x132e7c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r13 + 19], 5 - QUAD $0x06130e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 19], 6 - QUAD $0x0713167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 19], 7 - QUAD $0x13067c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r8 + 19], 8 - QUAD $0x130e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r9 + 19], 9 - QUAD $0x0a13067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 19], 10 - QUAD $0x133e7c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r15 + 19], 11 - QUAD $0x13267c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r12 + 19], 12 - LONG $0x247c8b48; BYTE $0x08 // mov rdi, qword [rsp + 8] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x0413067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 19], 4 + QUAD $0x05130e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 19], 5 + QUAD $0x0613167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 19], 6 + QUAD $0x13067c203a0f4266; BYTE $0x07 // pinsrb xmm7, byte [rsi + r8 + 19], 7 + QUAD $0x130e7c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r9 + 19], 8 + QUAD $0x13167c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r10 + 19], 9 + QUAD $0x133e7c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r15 + 19], 10 + QUAD $0x13267c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r12 + 19], 11 + QUAD $0x132e7c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r13 + 19], 12 + LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] QUAD $0x0d133e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 19], 13 - LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] + LONG $0x245c8b48; BYTE $0x18 // mov rbx, qword [rsp + 24] QUAD $0x0e131e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 19], 14 - QUAD $0x13167c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r10 + 19], 15 + QUAD $0x13367c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r14 + 19], 15 QUAD $0x141e74203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r11 + 20], 3 - QUAD $0x143674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r14 + 20], 4 - QUAD $0x142e74203a0f4266; BYTE $0x05 // pinsrb xmm6, byte [rsi + r13 + 20], 5 - QUAD $0x06140e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 20], 6 - QUAD $0x07141674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 20], 7 - QUAD $0x140674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r8 + 20], 8 - QUAD $0x140e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r9 + 20], 9 - QUAD $0x0a140674203a0f66 // pinsrb xmm6, byte [rsi + rax + 20], 10 - QUAD $0x143e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r15 + 20], 11 - QUAD $0x142674203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r12 + 20], 12 + QUAD $0x04140674203a0f66 // pinsrb xmm6, byte [rsi + rax + 20], 4 + QUAD $0x05140e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 20], 5 + QUAD $0x06141674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 20], 6 + QUAD $0x140674203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r8 + 20], 7 + QUAD $0x140e74203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r9 + 20], 8 + QUAD $0x141674203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r10 + 20], 9 + QUAD $0x143e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r15 + 20], 10 + QUAD $0x142674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r12 + 20], 11 + QUAD $0x142e74203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r13 + 20], 12 QUAD $0x0d143e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 20], 13 QUAD $0x0e141e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 20], 14 - QUAD $0x00009024aceb0f66; BYTE $0x00 // por xmm5, oword [rsp + 144] - QUAD $0x141674203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r10 + 20], 15 - QUAD $0x00a024bc6f0f4466; WORD $0x0000 // movdqa xmm15, oword [rsp + 160] + QUAD $0x0000b024aceb0f66; BYTE $0x00 // por xmm5, oword [rsp + 176] + QUAD $0x143674203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r14 + 20], 15 + QUAD $0x009024bc6f0f4466; WORD $0x0000 // movdqa xmm15, oword [rsp + 144] LONG $0x640f4166; BYTE $0xff // pcmpgtb xmm7, xmm15 QUAD $0x000000c0bddb0f66 // pand xmm7, oword 192[rbp] /* [rip + .LCPI7_12] */ LONG $0x640f4166; BYTE $0xf7 // pcmpgtb xmm6, xmm15 QUAD $0x000000d0b5db0f66 // pand xmm6, oword 208[rbp] /* [rip + .LCPI7_13] */ LONG $0xf7eb0f66 // por xmm6, xmm7 QUAD $0x151e54203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r11 + 21], 3 - QUAD $0x153654203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r14 + 21], 4 - QUAD $0x152e54203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r13 + 21], 5 - QUAD $0x06150e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 21], 6 - QUAD $0x07151654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 21], 7 - QUAD $0x150654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r8 + 21], 8 - QUAD $0x150e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r9 + 21], 9 - QUAD $0x0a150654203a0f66 // pinsrb xmm2, byte [rsi + rax + 21], 10 - QUAD $0x153e54203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r15 + 21], 11 - QUAD $0x152654203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r12 + 21], 12 + QUAD $0x04150654203a0f66 // pinsrb xmm2, byte [rsi + rax + 21], 4 + QUAD $0x05150e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 21], 5 + QUAD $0x06151654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 21], 6 + QUAD $0x150654203a0f4266; BYTE $0x07 // pinsrb xmm2, byte [rsi + r8 + 21], 7 + QUAD $0x150e54203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r9 + 21], 8 + QUAD $0x151654203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r10 + 21], 9 + QUAD $0x153e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r15 + 21], 10 + QUAD $0x152654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r12 + 21], 11 + QUAD $0x152e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r13 + 21], 12 QUAD $0x0d153e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 21], 13 QUAD $0x0e151e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 21], 14 - QUAD $0x151654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r10 + 21], 15 + QUAD $0x153654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r14 + 21], 15 LONG $0x640f4166; BYTE $0xd7 // pcmpgtb xmm2, xmm15 QUAD $0x000000e0bd6f0f66 // movdqa xmm7, oword 224[rbp] /* [rip + .LCPI7_14] */ LONG $0xd7db0f66 // pand xmm2, xmm7 LONG $0xd6eb0f66 // por xmm2, xmm6 LONG $0xd5eb0f66 // por xmm2, xmm5 QUAD $0x161e5c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r11 + 22], 3 - QUAD $0x16365c203a0f4266; BYTE $0x04 // pinsrb xmm3, byte [rsi + r14 + 22], 4 - QUAD $0x162e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r13 + 22], 5 - QUAD $0x06160e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 22], 6 - QUAD $0x0716165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 22], 7 - QUAD $0x16065c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r8 + 22], 8 - QUAD $0x160e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r9 + 22], 9 - QUAD $0x0a16065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 22], 10 - QUAD $0x163e5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r15 + 22], 11 - QUAD $0x16265c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r12 + 22], 12 + QUAD $0x0416065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 22], 4 + QUAD $0x05160e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 22], 5 + QUAD $0x0616165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 22], 6 + QUAD $0x16065c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r8 + 22], 7 + QUAD $0x160e5c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r9 + 22], 8 + QUAD $0x16165c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r10 + 22], 9 + QUAD $0x163e5c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r15 + 22], 10 + QUAD $0x16265c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r12 + 22], 11 + QUAD $0x162e5c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r13 + 22], 12 QUAD $0x0d163e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 22], 13 QUAD $0x0e161e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 22], 14 - QUAD $0x16165c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r10 + 22], 15 + QUAD $0x16365c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r14 + 22], 15 QUAD $0x171e44203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r11 + 23], 3 - QUAD $0x173644203a0f4666; BYTE $0x04 // pinsrb xmm8, byte [rsi + r14 + 23], 4 - QUAD $0x172e44203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r13 + 23], 5 - QUAD $0x170e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rcx + 23], 6 - QUAD $0x171644203a0f4466; BYTE $0x07 // pinsrb xmm8, byte [rsi + rdx + 23], 7 - QUAD $0x170644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r8 + 23], 8 - QUAD $0x170e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r9 + 23], 9 - QUAD $0x170644203a0f4466; BYTE $0x0a // pinsrb xmm8, byte [rsi + rax + 23], 10 - QUAD $0x173e44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r15 + 23], 11 - QUAD $0x172644203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r12 + 23], 12 + QUAD $0x170644203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rax + 23], 4 + QUAD $0x170e44203a0f4466; BYTE $0x05 // pinsrb xmm8, byte [rsi + rcx + 23], 5 + QUAD $0x171644203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rdx + 23], 6 + QUAD $0x170644203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r8 + 23], 7 + QUAD $0x170e44203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r9 + 23], 8 + QUAD $0x171644203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r10 + 23], 9 + QUAD $0x173e44203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r15 + 23], 10 + QUAD $0x172644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r12 + 23], 11 + QUAD $0x172e44203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r13 + 23], 12 QUAD $0x173e44203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rsi + rdi + 23], 13 QUAD $0x171e44203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rbx + 23], 14 - QUAD $0x171644203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r10 + 23], 15 + QUAD $0x173644203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r14 + 23], 15 LONG $0x640f4166; BYTE $0xdf // pcmpgtb xmm3, xmm15 QUAD $0x000000f0ad6f0f66 // movdqa xmm5, oword 240[rbp] /* [rip + .LCPI7_15] */ LONG $0xdddb0f66 // pand xmm3, xmm5 @@ -36319,18 +37705,18 @@ LBB7_85: LONG $0xdb0f4466; BYTE $0xc6 // pand xmm8, xmm6 LONG $0xeb0f4466; BYTE $0xc3 // por xmm8, xmm3 QUAD $0x191e4c203a0f4266; BYTE $0x03 // pinsrb xmm1, byte [rsi + r11 + 25], 3 - QUAD $0x19364c203a0f4266; BYTE $0x04 // pinsrb xmm1, byte [rsi + r14 + 25], 4 - QUAD $0x192e4c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r13 + 25], 5 - QUAD $0x06190e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 25], 6 - QUAD $0x0719164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 25], 7 - QUAD $0x19064c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r8 + 25], 8 - QUAD $0x190e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r9 + 25], 9 - QUAD $0x0a19064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 25], 10 - QUAD $0x193e4c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r15 + 25], 11 - QUAD $0x19264c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r12 + 25], 12 + QUAD $0x0419064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 25], 4 + QUAD $0x05190e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 25], 5 + QUAD $0x0619164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 25], 6 + QUAD $0x19064c203a0f4266; BYTE $0x07 // pinsrb xmm1, byte [rsi + r8 + 25], 7 + QUAD $0x190e4c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r9 + 25], 8 + QUAD $0x19164c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r10 + 25], 9 + QUAD $0x193e4c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r15 + 25], 10 + QUAD $0x19264c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r12 + 25], 11 + QUAD $0x192e4c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r13 + 25], 12 QUAD $0x0d193e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 25], 13 QUAD $0x0e191e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 25], 14 - QUAD $0x19164c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r10 + 25], 15 + QUAD $0x19364c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r14 + 25], 15 LONG $0xeb0f4466; BYTE $0xc2 // por xmm8, xmm2 LONG $0x640f4166; BYTE $0xcf // pcmpgtb xmm1, xmm15 LONG $0xd16f0f66 // movdqa xmm2, xmm1 @@ -36338,73 +37724,73 @@ LBB7_85: LONG $0xd3db0f66 // pand xmm2, xmm3 LONG $0xd1f80f66 // psubb xmm2, xmm1 QUAD $0x181e54203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r11 + 24], 3 - QUAD $0x183654203a0f4666; BYTE $0x04 // pinsrb xmm10, byte [rsi + r14 + 24], 4 - QUAD $0x182e54203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r13 + 24], 5 - QUAD $0x180e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rcx + 24], 6 - QUAD $0x181654203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rdx + 24], 7 - QUAD $0x180654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r8 + 24], 8 - QUAD $0x180e54203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r9 + 24], 9 - QUAD $0x180654203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rax + 24], 10 - QUAD $0x183e54203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r15 + 24], 11 - QUAD $0x182654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 24], 12 + QUAD $0x180654203a0f4466; BYTE $0x04 // pinsrb xmm10, byte [rsi + rax + 24], 4 + QUAD $0x180e54203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rcx + 24], 5 + QUAD $0x181654203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rdx + 24], 6 + QUAD $0x180654203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r8 + 24], 7 + QUAD $0x180e54203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r9 + 24], 8 + QUAD $0x181654203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r10 + 24], 9 + QUAD $0x183e54203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r15 + 24], 10 + QUAD $0x182654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r12 + 24], 11 + QUAD $0x182e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r13 + 24], 12 QUAD $0x183e54203a0f4466; BYTE $0x0d // pinsrb xmm10, byte [rsi + rdi + 24], 13 QUAD $0x181e54203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rbx + 24], 14 - QUAD $0x181654203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r10 + 24], 15 + QUAD $0x183654203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r14 + 24], 15 LONG $0x640f4566; BYTE $0xd7 // pcmpgtb xmm10, xmm15 LONG $0xdb0f4466; BYTE $0xd3 // pand xmm10, xmm3 QUAD $0x1a1e5c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r11 + 26], 3 - QUAD $0x1a365c203a0f4666; BYTE $0x04 // pinsrb xmm11, byte [rsi + r14 + 26], 4 - QUAD $0x1a2e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r13 + 26], 5 - QUAD $0x1a0e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rcx + 26], 6 - QUAD $0x1a165c203a0f4466; BYTE $0x07 // pinsrb xmm11, byte [rsi + rdx + 26], 7 - QUAD $0x1a065c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r8 + 26], 8 - QUAD $0x1a0e5c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r9 + 26], 9 - QUAD $0x1a065c203a0f4466; BYTE $0x0a // pinsrb xmm11, byte [rsi + rax + 26], 10 - QUAD $0x1a3e5c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r15 + 26], 11 - QUAD $0x1a265c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r12 + 26], 12 + QUAD $0x1a065c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rax + 26], 4 + QUAD $0x1a0e5c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rcx + 26], 5 + QUAD $0x1a165c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rdx + 26], 6 + QUAD $0x1a065c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r8 + 26], 7 + QUAD $0x1a0e5c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r9 + 26], 8 + QUAD $0x1a165c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r10 + 26], 9 + QUAD $0x1a3e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r15 + 26], 10 + QUAD $0x1a265c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r12 + 26], 11 + QUAD $0x1a2e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r13 + 26], 12 QUAD $0x1a3e5c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rdi + 26], 13 QUAD $0x1a1e5c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rbx + 26], 14 - QUAD $0x1a165c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r10 + 26], 15 + QUAD $0x1a365c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r14 + 26], 15 LONG $0x640f4566; BYTE $0xdf // pcmpgtb xmm11, xmm15 QUAD $0x0000b09ddb0f4466; BYTE $0x00 // pand xmm11, oword 176[rbp] /* [rip + .LCPI7_11] */ LONG $0xeb0f4566; BYTE $0xda // por xmm11, xmm10 LONG $0xeb0f4466; BYTE $0xda // por xmm11, xmm2 QUAD $0x1b1e4c203a0f4666; BYTE $0x03 // pinsrb xmm9, byte [rsi + r11 + 27], 3 - QUAD $0x1b364c203a0f4666; BYTE $0x04 // pinsrb xmm9, byte [rsi + r14 + 27], 4 - QUAD $0x1b2e4c203a0f4666; BYTE $0x05 // pinsrb xmm9, byte [rsi + r13 + 27], 5 - QUAD $0x1b0e4c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rcx + 27], 6 - QUAD $0x1b164c203a0f4466; BYTE $0x07 // pinsrb xmm9, byte [rsi + rdx + 27], 7 - QUAD $0x1b064c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r8 + 27], 8 - QUAD $0x1b0e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r9 + 27], 9 - QUAD $0x1b064c203a0f4466; BYTE $0x0a // pinsrb xmm9, byte [rsi + rax + 27], 10 - QUAD $0x1b3e4c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r15 + 27], 11 - QUAD $0x1b264c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r12 + 27], 12 + QUAD $0x1b064c203a0f4466; BYTE $0x04 // pinsrb xmm9, byte [rsi + rax + 27], 4 + QUAD $0x1b0e4c203a0f4466; BYTE $0x05 // pinsrb xmm9, byte [rsi + rcx + 27], 5 + QUAD $0x1b164c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rdx + 27], 6 + QUAD $0x1b064c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r8 + 27], 7 + QUAD $0x1b0e4c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r9 + 27], 8 + QUAD $0x1b164c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r10 + 27], 9 + QUAD $0x1b3e4c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r15 + 27], 10 + QUAD $0x1b264c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r12 + 27], 11 + QUAD $0x1b2e4c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r13 + 27], 12 QUAD $0x1b3e4c203a0f4466; BYTE $0x0d // pinsrb xmm9, byte [rsi + rdi + 27], 13 QUAD $0x1b1e4c203a0f4466; BYTE $0x0e // pinsrb xmm9, byte [rsi + rbx + 27], 14 - QUAD $0x1b164c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r10 + 27], 15 + QUAD $0x1b364c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r14 + 27], 15 QUAD $0x1c1e64203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r11 + 28], 3 - QUAD $0x1c3664203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r14 + 28], 4 - QUAD $0x1c2e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r13 + 28], 5 - QUAD $0x061c0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 28], 6 - QUAD $0x071c1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 28], 7 - QUAD $0x1c0664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r8 + 28], 8 - QUAD $0x1c0e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 28], 9 - QUAD $0x0a1c0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 28], 10 - QUAD $0x1c3e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 28], 11 - QUAD $0x1c2664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 28], 12 + QUAD $0x041c0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 28], 4 + QUAD $0x051c0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 28], 5 + QUAD $0x061c1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 28], 6 + QUAD $0x1c0664203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r8 + 28], 7 + QUAD $0x1c0e64203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r9 + 28], 8 + QUAD $0x1c1664203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r10 + 28], 9 + QUAD $0x1c3e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r15 + 28], 10 + QUAD $0x1c2664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r12 + 28], 11 + QUAD $0x1c2e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r13 + 28], 12 QUAD $0x0d1c3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 28], 13 QUAD $0x0e1c1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 28], 14 - QUAD $0x1c1664203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r10 + 28], 15 + QUAD $0x1c3664203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r14 + 28], 15 QUAD $0x1d1e6c203a0f4666; BYTE $0x03 // pinsrb xmm13, byte [rsi + r11 + 29], 3 - QUAD $0x1d366c203a0f4666; BYTE $0x04 // pinsrb xmm13, byte [rsi + r14 + 29], 4 - QUAD $0x1d2e6c203a0f4666; BYTE $0x05 // pinsrb xmm13, byte [rsi + r13 + 29], 5 - QUAD $0x1d0e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rcx + 29], 6 - QUAD $0x1d166c203a0f4466; BYTE $0x07 // pinsrb xmm13, byte [rsi + rdx + 29], 7 - QUAD $0x1d066c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r8 + 29], 8 - QUAD $0x1d0e6c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r9 + 29], 9 - QUAD $0x1d066c203a0f4466; BYTE $0x0a // pinsrb xmm13, byte [rsi + rax + 29], 10 - QUAD $0x1d3e6c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r15 + 29], 11 - QUAD $0x1d266c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r12 + 29], 12 + QUAD $0x1d066c203a0f4466; BYTE $0x04 // pinsrb xmm13, byte [rsi + rax + 29], 4 + QUAD $0x1d0e6c203a0f4466; BYTE $0x05 // pinsrb xmm13, byte [rsi + rcx + 29], 5 + QUAD $0x1d166c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rdx + 29], 6 + QUAD $0x1d066c203a0f4666; BYTE $0x07 // pinsrb xmm13, byte [rsi + r8 + 29], 7 + QUAD $0x1d0e6c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r9 + 29], 8 + QUAD $0x1d166c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r10 + 29], 9 + QUAD $0x1d3e6c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r15 + 29], 10 + QUAD $0x1d266c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r12 + 29], 11 + QUAD $0x1d2e6c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r13 + 29], 12 QUAD $0x1d3e6c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rdi + 29], 13 QUAD $0x1d1e6c203a0f4466; BYTE $0x0e // pinsrb xmm13, byte [rsi + rbx + 29], 14 LONG $0x6f0f4166; BYTE $0xcf // movdqa xmm1, xmm15 @@ -36413,37 +37799,36 @@ LBB7_85: LONG $0x640f4166; BYTE $0xe7 // pcmpgtb xmm4, xmm15 QUAD $0x000000d0a5db0f66 // pand xmm4, oword 208[rbp] /* [rip + .LCPI7_13] */ LONG $0xeb0f4166; BYTE $0xe1 // por xmm4, xmm9 - QUAD $0x1d166c203a0f4666; BYTE $0x0f // pinsrb xmm13, byte [rsi + r10 + 29], 15 + QUAD $0x1d366c203a0f4666; BYTE $0x0f // pinsrb xmm13, byte [rsi + r14 + 29], 15 LONG $0x640f4566; BYTE $0xef // pcmpgtb xmm13, xmm15 LONG $0xdb0f4466; BYTE $0xef // pand xmm13, xmm7 LONG $0xeb0f4466; BYTE $0xec // por xmm13, xmm4 QUAD $0x1e1e64203a0f4666; BYTE $0x03 // pinsrb xmm12, byte [rsi + r11 + 30], 3 QUAD $0x1f1e44203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r11 + 31], 3 - QUAD $0x1e3664203a0f4666; BYTE $0x04 // pinsrb xmm12, byte [rsi + r14 + 30], 4 - QUAD $0x1f3644203a0f4266; BYTE $0x04 // pinsrb xmm0, byte [rsi + r14 + 31], 4 - QUAD $0x1e2e64203a0f4666; BYTE $0x05 // pinsrb xmm12, byte [rsi + r13 + 30], 5 - QUAD $0x1f2e44203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r13 + 31], 5 - QUAD $0x1e0e64203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rcx + 30], 6 - QUAD $0x061f0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 31], 6 - QUAD $0x1e1664203a0f4466; BYTE $0x07 // pinsrb xmm12, byte [rsi + rdx + 30], 7 - QUAD $0x071f1644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 31], 7 - QUAD $0x1e0664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r8 + 30], 8 - QUAD $0x1f0644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r8 + 31], 8 - QUAD $0x1e0e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r9 + 30], 9 - QUAD $0x1f0e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 31], 9 - QUAD $0x1e0664203a0f4466; BYTE $0x0a // pinsrb xmm12, byte [rsi + rax + 30], 10 - QUAD $0x0a1f0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 31], 10 - QUAD $0x1e3e64203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r15 + 30], 11 - QUAD $0x1f3e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r15 + 31], 11 - QUAD $0x1e2664203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r12 + 30], 12 - QUAD $0x1f2644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r12 + 31], 12 + QUAD $0x1e0664203a0f4466; BYTE $0x04 // pinsrb xmm12, byte [rsi + rax + 30], 4 + QUAD $0x041f0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 31], 4 + QUAD $0x1e0e64203a0f4466; BYTE $0x05 // pinsrb xmm12, byte [rsi + rcx + 30], 5 + QUAD $0x051f0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 31], 5 + QUAD $0x1e1664203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rdx + 30], 6 + QUAD $0x061f1644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 31], 6 + QUAD $0x1e0664203a0f4666; BYTE $0x07 // pinsrb xmm12, byte [rsi + r8 + 30], 7 + QUAD $0x1f0644203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r8 + 31], 7 + QUAD $0x1e0e64203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r9 + 30], 8 + QUAD $0x1f0e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r9 + 31], 8 + QUAD $0x1e1664203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r10 + 30], 9 + QUAD $0x1f1644203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r10 + 31], 9 + QUAD $0x1e3e64203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r15 + 30], 10 + QUAD $0x1f3e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r15 + 31], 10 + QUAD $0x1e2664203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r12 + 30], 11 + QUAD $0x1f2644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r12 + 31], 11 + QUAD $0x1e2e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r13 + 30], 12 + QUAD $0x1f2e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r13 + 31], 12 QUAD $0x1e3e64203a0f4466; BYTE $0x0d // pinsrb xmm12, byte [rsi + rdi + 30], 13 QUAD $0x0d1f3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 31], 13 QUAD $0x1e1e64203a0f4466; BYTE $0x0e // pinsrb xmm12, byte [rsi + rbx + 30], 14 QUAD $0x0e1f1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 31], 14 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x1e1664203a0f4666; BYTE $0x0f // pinsrb xmm12, byte [rsi + r10 + 30], 15 - QUAD $0x1f1644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r10 + 31], 15 + QUAD $0x1e3664203a0f4666; BYTE $0x0f // pinsrb xmm12, byte [rsi + r14 + 30], 15 + QUAD $0x1f3644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r14 + 31], 15 LONG $0xeb0f4566; BYTE $0xeb // por xmm13, xmm11 LONG $0x640f4566; BYTE $0xe7 // pcmpgtb xmm12, xmm15 LONG $0xdb0f4466; BYTE $0xe5 // pand xmm12, xmm5 @@ -36454,7 +37839,7 @@ LBB7_85: LONG $0xeb0f4166; BYTE $0xc5 // por xmm0, xmm13 LONG $0x6f0f4166; BYTE $0xc8 // movdqa xmm1, xmm8 LONG $0xc8600f66 // punpcklbw xmm1, xmm0 - QUAD $0x0000d024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 208] + QUAD $0x0000a024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 160] LONG $0xd46f0f66 // movdqa xmm2, xmm4 LONG $0x600f4166; BYTE $0xd6 // punpcklbw xmm2, xmm14 LONG $0xda6f0f66 // movdqa xmm3, xmm2 @@ -36466,6 +37851,7 @@ LBB7_85: LONG $0x610f4166; BYTE $0xc0 // punpcklwd xmm0, xmm8 LONG $0x690f4166; BYTE $0xe0 // punpckhwd xmm4, xmm8 QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] + LONG $0x24048b48 // mov rax, qword [rsp] LONG $0x647f0ff3; WORD $0x3088 // movdqu oword [rax + 4*rcx + 48], xmm4 LONG $0x447f0ff3; WORD $0x2088 // movdqu oword [rax + 4*rcx + 32], xmm0 LONG $0x547f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm2 @@ -36476,44 +37862,44 @@ LBB7_85: JNE LBB7_85 QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] QUAD $0x000000e824943b4c // cmp r10, qword [rsp + 232] - LONG $0x24348a44 // mov r14b, byte [rsp] + LONG $0x24648a44; BYTE $0x08 // mov r12b, byte [rsp + 8] QUAD $0x0000010824b48b48 // mov rsi, qword [rsp + 264] QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] JNE LBB7_87 JMP LBB7_90 LBB7_66: - LONG $0xf0e28349 // and r10, -16 - WORD $0x894c; BYTE $0xd0 // mov rax, r10 + LONG $0xf0e78349 // and r15, -16 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi QUAD $0x0000014024848948 // mov qword [rsp + 320], rax - QUAD $0x000000e82494894c // mov qword [rsp + 232], r10 - LONG $0x94048d4b // lea rax, [r12 + 4*r10] - LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax - LONG $0x2444b60f; BYTE $0x28 // movzx eax, byte [rsp + 40] + QUAD $0x000000e824bc894c // mov qword [rsp + 232], r15 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0xb8048d4a // lea rax, [rax + 4*r15] + LONG $0x24448948; BYTE $0x78 // mov qword [rsp + 120], rax + LONG $0xc2b60f41 // movzx eax, r10b LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 QUAD $0x000120248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 288], xmm1 WORD $0xc031 // xor eax, eax - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 LBB7_67: - QUAD $0x0000009024848948 // mov qword [rsp + 144], rax - QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x000000b024848948 // mov qword [rsp + 176], rax + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] LONG $0x05e0c148 // shl rax, 5 + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax + WORD $0x8949; BYTE $0xc6 // mov r14, rax WORD $0x8948; BYTE $0xc2 // mov rdx, rax - WORD $0x8949; BYTE $0xc4 // mov r12, rax - WORD $0x8949; BYTE $0xc3 // mov r11, rax WORD $0x8948; BYTE $0xc7 // mov rdi, rax - LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax + WORD $0x8949; BYTE $0xc5 // mov r13, rax WORD $0x8949; BYTE $0xc1 // mov r9, rax - WORD $0x8949; BYTE $0xc7 // mov r15, rax WORD $0x8949; BYTE $0xc2 // mov r10, rax - WORD $0x8949; BYTE $0xc6 // mov r14, rax + WORD $0x8949; BYTE $0xc3 // mov r11, rax + WORD $0x8949; BYTE $0xc7 // mov r15, rax WORD $0x8949; BYTE $0xc0 // mov r8, rax - LONG $0x24448948; BYTE $0x68 // mov qword [rsp + 104], rax + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax LONG $0x060cb60f // movzx ecx, byte [rsi + rax] LONG $0x6e0f4466; BYTE $0xd1 // movd xmm10, ecx LONG $0x064cb60f; BYTE $0x01 // movzx ecx, byte [rsi + rax + 1] @@ -36530,12 +37916,12 @@ LBB7_67: LONG $0xc16e0f66 // movd xmm0, ecx LONG $0x064cb60f; BYTE $0x07 // movzx ecx, byte [rsi + rax + 7] LONG $0xc96e0f66 // movd xmm1, ecx - QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 + QUAD $0x0000c0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm1 LONG $0x064cb60f; BYTE $0x08 // movzx ecx, byte [rsi + rax + 8] LONG $0x6e0f4466; BYTE $0xf1 // movd xmm14, ecx LONG $0x064cb60f; BYTE $0x09 // movzx ecx, byte [rsi + rax + 9] LONG $0xc96e0f66 // movd xmm1, ecx - QUAD $0x0000c0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm1 + QUAD $0x0000d0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm1 LONG $0x064cb60f; BYTE $0x0a // movzx ecx, byte [rsi + rax + 10] LONG $0xd16e0f66 // movd xmm2, ecx LONG $0x064cb60f; BYTE $0x0b // movzx ecx, byte [rsi + rax + 11] @@ -36545,171 +37931,172 @@ LBB7_67: QUAD $0x000130248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 304], xmm1 LONG $0x064cb60f; BYTE $0x10 // movzx ecx, byte [rsi + rax + 16] LONG $0x6e0f4466; BYTE $0xe9 // movd xmm13, ecx - LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax + QUAD $0x000000a024848948 // mov qword [rsp + 160], rax LONG $0x064cb60f; BYTE $0x18 // movzx ecx, byte [rsi + rax + 24] LONG $0x6e0f4466; BYTE $0xf9 // movd xmm15, ecx - WORD $0x8949; BYTE $0xc5 // mov r13, rax - LONG $0x20cd8349 // or r13, 32 - LONG $0x246c894c; BYTE $0x38 // mov qword [rsp + 56], r13 - LONG $0x40ca8348 // or rdx, 64 - LONG $0x24548948; BYTE $0x58 // mov qword [rsp + 88], rdx - LONG $0x60cc8349 // or r12, 96 - LONG $0x2464894c; BYTE $0x10 // mov qword [rsp + 16], r12 - LONG $0x80cb8149; WORD $0x0000; BYTE $0x00 // or r11, 128 + WORD $0x8949; BYTE $0xc4 // mov r12, rax + LONG $0x20cc8349 // or r12, 32 + LONG $0x2464894c; BYTE $0x60 // mov qword [rsp + 96], r12 + LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] + LONG $0x40c98348 // or rcx, 64 + LONG $0x244c8948; BYTE $0x20 // mov qword [rsp + 32], rcx + LONG $0x60ce8349 // or r14, 96 + LONG $0x80ca8148; WORD $0x0000; BYTE $0x00 // or rdx, 128 LONG $0xa0cf8148; WORD $0x0000; BYTE $0x00 // or rdi, 160 - LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] - LONG $0xc0c98148; WORD $0x0000; BYTE $0x00 // or rcx, 192 - LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx + LONG $0xc0cd8149; WORD $0x0000; BYTE $0x00 // or r13, 192 + LONG $0x246c894c; BYTE $0x68 // mov qword [rsp + 104], r13 LONG $0xe0c98149; WORD $0x0000; BYTE $0x00 // or r9, 224 - LONG $0x00cf8149; WORD $0x0001; BYTE $0x00 // or r15, 256 - LONG $0x247c894c; BYTE $0x70 // mov qword [rsp + 112], r15 - LONG $0x20ca8149; WORD $0x0001; BYTE $0x00 // or r10, 288 - LONG $0x2454894c; BYTE $0x78 // mov qword [rsp + 120], r10 - LONG $0x40ce8149; WORD $0x0001; BYTE $0x00 // or r14, 320 + LONG $0x00ca8149; WORD $0x0001; BYTE $0x00 // or r10, 256 + LONG $0x2454894c; BYTE $0x70 // mov qword [rsp + 112], r10 + LONG $0x20cb8149; WORD $0x0001; BYTE $0x00 // or r11, 288 + LONG $0x40cf8149; WORD $0x0001; BYTE $0x00 // or r15, 320 LONG $0x60c88149; WORD $0x0001; BYTE $0x00 // or r8, 352 - QUAD $0x000000d02484894c // mov qword [rsp + 208], r8 - LONG $0x24448b4c; BYTE $0x68 // mov r8, qword [rsp + 104] + LONG $0x2444894c; BYTE $0x28 // mov qword [rsp + 40], r8 + LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] LONG $0x80c88149; WORD $0x0001; BYTE $0x00 // or r8, 384 WORD $0x8948; BYTE $0xc3 // mov rbx, rax LONG $0xa0cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 416 - LONG $0x241c8948 // mov qword [rsp], rbx + LONG $0x245c8948; BYTE $0x10 // mov qword [rsp + 16], rbx WORD $0x8948; BYTE $0xc3 // mov rbx, rax LONG $0xc0cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 448 LONG $0x245c8948; BYTE $0x18 // mov qword [rsp + 24], rbx - WORD $0x8948; BYTE $0xc3 // mov rbx, rax - LONG $0xe0cb8148; WORD $0x0001; BYTE $0x00 // or rbx, 480 - LONG $0x245c8948; BYTE $0x20 // mov qword [rsp + 32], rbx - QUAD $0x012e14203a0f4666 // pinsrb xmm10, byte [rsi + r13], 1 - QUAD $0x021614203a0f4466 // pinsrb xmm10, byte [rsi + rdx], 2 - QUAD $0x032614203a0f4666 // pinsrb xmm10, byte [rsi + r12], 3 - WORD $0x894d; BYTE $0xdc // mov r12, r11 - LONG $0x245c894c; BYTE $0x30 // mov qword [rsp + 48], r11 - QUAD $0x041e14203a0f4666 // pinsrb xmm10, byte [rsi + r11], 4 + LONG $0x01e00d48; WORD $0x0000 // or rax, 480 + LONG $0x24448948; BYTE $0x48 // mov qword [rsp + 72], rax + QUAD $0x012614203a0f4666 // pinsrb xmm10, byte [rsi + r12], 1 + QUAD $0x020e14203a0f4466 // pinsrb xmm10, byte [rsi + rcx], 2 + QUAD $0x033614203a0f4666 // pinsrb xmm10, byte [rsi + r14], 3 + LONG $0x2474894c; BYTE $0x40 // mov qword [rsp + 64], r14 + QUAD $0x041614203a0f4466 // pinsrb xmm10, byte [rsi + rdx], 4 QUAD $0x053e14203a0f4466 // pinsrb xmm10, byte [rsi + rdi], 5 - LONG $0x247c8948; BYTE $0x40 // mov qword [rsp + 64], rdi - QUAD $0x060e14203a0f4466 // pinsrb xmm10, byte [rsi + rcx], 6 + QUAD $0x062e14203a0f4666 // pinsrb xmm10, byte [rsi + r13], 6 QUAD $0x070e14203a0f4666 // pinsrb xmm10, byte [rsi + r9], 7 - WORD $0x894d; BYTE $0xcb // mov r11, r9 - QUAD $0x083e14203a0f4666 // pinsrb xmm10, byte [rsi + r15], 8 - QUAD $0x091614203a0f4666 // pinsrb xmm10, byte [rsi + r10], 9 - QUAD $0x0a3614203a0f4666 // pinsrb xmm10, byte [rsi + r14], 10 - QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] - QUAD $0x0b2e14203a0f4666 // pinsrb xmm10, byte [rsi + r13], 11 + QUAD $0x081614203a0f4666 // pinsrb xmm10, byte [rsi + r10], 8 + QUAD $0x091e14203a0f4666 // pinsrb xmm10, byte [rsi + r11], 9 + QUAD $0x0a3e14203a0f4666 // pinsrb xmm10, byte [rsi + r15], 10 + LONG $0x24648b4c; BYTE $0x28 // mov r12, qword [rsp + 40] + QUAD $0x0b2614203a0f4666 // pinsrb xmm10, byte [rsi + r12], 11 QUAD $0x0c0614203a0f4666 // pinsrb xmm10, byte [rsi + r8], 12 - LONG $0x24048b48 // mov rax, qword [rsp] - QUAD $0x0d0614203a0f4466 // pinsrb xmm10, byte [rsi + rax], 13 - WORD $0x8949; BYTE $0xc7 // mov r15, rax + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] + QUAD $0x0d1e14203a0f4466 // pinsrb xmm10, byte [rsi + rbx], 13 LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0e0614203a0f4466 // pinsrb xmm10, byte [rsi + rax], 14 - QUAD $0x0f1e14203a0f4466 // pinsrb xmm10, byte [rsi + rbx], 15 + LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] + QUAD $0x0f1614203a0f4666 // pinsrb xmm10, byte [rsi + r10], 15 LONG $0x6f0f4566; BYTE $0xc2 // movdqa xmm8, xmm10 QUAD $0x012024a46f0f4466; WORD $0x0000 // movdqa xmm12, oword [rsp + 288] LONG $0xda0f4566; BYTE $0xc4 // pminub xmm8, xmm12 LONG $0x740f4566; BYTE $0xc2 // pcmpeqb xmm8, xmm10 - LONG $0x244c8b4c; BYTE $0x38 // mov r9, qword [rsp + 56] - QUAD $0x010e64203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r9 + 1], 1 - QUAD $0x02011664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 1], 2 - LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] - QUAD $0x03011e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 1], 3 - QUAD $0x012664203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r12 + 1], 4 + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x01010664203a0f66 // pinsrb xmm4, byte [rsi + rax + 1], 1 + QUAD $0x02010e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 1], 2 + QUAD $0x013664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r14 + 1], 3 + QUAD $0x04011664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 1], 4 QUAD $0x05013e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 1], 5 - QUAD $0x06010e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 1], 6 - QUAD $0x011e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r11 + 1], 7 - LONG $0x24548b4c; BYTE $0x70 // mov r10, qword [rsp + 112] - QUAD $0x011664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r10 + 1], 8 - LONG $0x245c8b48; BYTE $0x78 // mov rbx, qword [rsp + 120] - QUAD $0x09011e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 1], 9 - QUAD $0x013664203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r14 + 1], 10 - QUAD $0x012e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r13 + 1], 11 + QUAD $0x012e64203a0f4266; BYTE $0x06 // pinsrb xmm4, byte [rsi + r13 + 1], 6 + WORD $0x894d; BYTE $0xce // mov r14, r9 + QUAD $0x010e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r9 + 1], 7 + LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] + QUAD $0x08011e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 1], 8 + QUAD $0x011e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r11 + 1], 9 + QUAD $0x013e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r15 + 1], 10 + QUAD $0x012664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r12 + 1], 11 QUAD $0x010664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r8 + 1], 12 - QUAD $0x013e64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r15 + 1], 13 + LONG $0x244c8b4c; BYTE $0x10 // mov r9, qword [rsp + 16] + QUAD $0x010e64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r9 + 1], 13 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x0e010664203a0f66 // pinsrb xmm4, byte [rsi + rax + 1], 14 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0f010664203a0f66 // pinsrb xmm4, byte [rsi + rax + 1], 15 + QUAD $0x011664203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r10 + 1], 15 + LONG $0x244c8b4c; BYTE $0x60 // mov r9, qword [rsp + 96] QUAD $0x020e74203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r9 + 2], 1 - QUAD $0x02021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 2 - LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] - QUAD $0x023e74203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r15 + 2], 3 - QUAD $0x022674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r12 + 2], 4 + QUAD $0x02020e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 2], 2 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x03020674203a0f66 // pinsrb xmm6, byte [rsi + rax + 2], 3 + QUAD $0x04021674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 2], 4 QUAD $0x05023e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 2], 5 - QUAD $0x06020e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 2], 6 - QUAD $0x021e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r11 + 2], 7 - QUAD $0x021674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r10 + 2], 8 - QUAD $0x09021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 9 - QUAD $0x023674203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r14 + 2], 10 - QUAD $0x022e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r13 + 2], 11 + QUAD $0x022e74203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r13 + 2], 6 + QUAD $0x023674203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r14 + 2], 7 + QUAD $0x08021e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 2], 8 + QUAD $0x021e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r11 + 2], 9 + QUAD $0x023e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r15 + 2], 10 + QUAD $0x022674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r12 + 2], 11 QUAD $0x020674203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r8 + 2], 12 - LONG $0x243c8b48 // mov rdi, qword [rsp] - QUAD $0x0d023e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 2], 13 - LONG $0x247c8b4c; BYTE $0x18 // mov r15, qword [rsp + 24] - QUAD $0x023e74203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r15 + 2], 14 - QUAD $0x0f020674203a0f66 // pinsrb xmm6, byte [rsi + rax + 2], 15 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x0d020674203a0f66 // pinsrb xmm6, byte [rsi + rax + 2], 13 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0e020674203a0f66 // pinsrb xmm6, byte [rsi + rax + 2], 14 + QUAD $0x021674203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r10 + 2], 15 QUAD $0x080e74203a0f4666; BYTE $0x01 // pinsrb xmm14, byte [rsi + r9 + 8], 1 - QUAD $0x081674203a0f4466; BYTE $0x02 // pinsrb xmm14, byte [rsi + rdx + 8], 2 - LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] - QUAD $0x083e74203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r15 + 8], 3 - QUAD $0x082674203a0f4666; BYTE $0x04 // pinsrb xmm14, byte [rsi + r12 + 8], 4 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] + QUAD $0x080e74203a0f4466; BYTE $0x02 // pinsrb xmm14, byte [rsi + rcx + 8], 2 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x080674203a0f4466; BYTE $0x03 // pinsrb xmm14, byte [rsi + rax + 8], 3 + QUAD $0x081674203a0f4466; BYTE $0x04 // pinsrb xmm14, byte [rsi + rdx + 8], 4 QUAD $0x083e74203a0f4466; BYTE $0x05 // pinsrb xmm14, byte [rsi + rdi + 8], 5 - QUAD $0x080e74203a0f4466; BYTE $0x06 // pinsrb xmm14, byte [rsi + rcx + 8], 6 - QUAD $0x081e74203a0f4666; BYTE $0x07 // pinsrb xmm14, byte [rsi + r11 + 8], 7 - QUAD $0x081674203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r10 + 8], 8 - QUAD $0x081e74203a0f4466; BYTE $0x09 // pinsrb xmm14, byte [rsi + rbx + 8], 9 - QUAD $0x083674203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rsi + r14 + 8], 10 - QUAD $0x082e74203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r13 + 8], 11 + QUAD $0x082e74203a0f4666; BYTE $0x06 // pinsrb xmm14, byte [rsi + r13 + 8], 6 + QUAD $0x083674203a0f4666; BYTE $0x07 // pinsrb xmm14, byte [rsi + r14 + 8], 7 + QUAD $0x081e74203a0f4466; BYTE $0x08 // pinsrb xmm14, byte [rsi + rbx + 8], 8 + QUAD $0x081e74203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r11 + 8], 9 + QUAD $0x083e74203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rsi + r15 + 8], 10 + QUAD $0x082674203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r12 + 8], 11 QUAD $0x080674203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r8 + 8], 12 - LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x080674203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rsi + rax + 8], 13 LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] QUAD $0x080674203a0f4466; BYTE $0x0e // pinsrb xmm14, byte [rsi + rax + 8], 14 - LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] QUAD $0x081674203a0f4666; BYTE $0x0f // pinsrb xmm14, byte [rsi + r10 + 8], 15 LONG $0x6f0f4566; BYTE $0xd6 // movdqa xmm10, xmm14 LONG $0xda0f4566; BYTE $0xd4 // pminub xmm10, xmm12 LONG $0x740f4566; BYTE $0xd6 // pcmpeqb xmm10, xmm14 QUAD $0x100e6c203a0f4666; BYTE $0x01 // pinsrb xmm13, byte [rsi + r9 + 16], 1 - QUAD $0x10166c203a0f4466; BYTE $0x02 // pinsrb xmm13, byte [rsi + rdx + 16], 2 - WORD $0x894d; BYTE $0xf9 // mov r9, r15 - QUAD $0x103e6c203a0f4666; BYTE $0x03 // pinsrb xmm13, byte [rsi + r15 + 16], 3 - QUAD $0x10266c203a0f4666; BYTE $0x04 // pinsrb xmm13, byte [rsi + r12 + 16], 4 + QUAD $0x100e6c203a0f4466; BYTE $0x02 // pinsrb xmm13, byte [rsi + rcx + 16], 2 + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] + QUAD $0x10066c203a0f4466; BYTE $0x03 // pinsrb xmm13, byte [rsi + rax + 16], 3 + QUAD $0x10166c203a0f4466; BYTE $0x04 // pinsrb xmm13, byte [rsi + rdx + 16], 4 + WORD $0x8948; BYTE $0xd0 // mov rax, rdx + LONG $0x24548948; BYTE $0x58 // mov qword [rsp + 88], rdx QUAD $0x103e6c203a0f4466; BYTE $0x05 // pinsrb xmm13, byte [rsi + rdi + 16], 5 - QUAD $0x100e6c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rcx + 16], 6 - QUAD $0x101e6c203a0f4666; BYTE $0x07 // pinsrb xmm13, byte [rsi + r11 + 16], 7 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x103e6c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r15 + 16], 8 - QUAD $0x101e6c203a0f4466; BYTE $0x09 // pinsrb xmm13, byte [rsi + rbx + 16], 9 - QUAD $0x10366c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r14 + 16], 10 - QUAD $0x102e6c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r13 + 16], 11 + LONG $0x247c8948; BYTE $0x50 // mov qword [rsp + 80], rdi + QUAD $0x102e6c203a0f4666; BYTE $0x06 // pinsrb xmm13, byte [rsi + r13 + 16], 6 + QUAD $0x10366c203a0f4666; BYTE $0x07 // pinsrb xmm13, byte [rsi + r14 + 16], 7 + QUAD $0x101e6c203a0f4466; BYTE $0x08 // pinsrb xmm13, byte [rsi + rbx + 16], 8 + WORD $0x8949; BYTE $0xdd // mov r13, rbx + QUAD $0x101e6c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r11 + 16], 9 + QUAD $0x00000080249c894c // mov qword [rsp + 128], r11 + QUAD $0x103e6c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r15 + 16], 10 + LONG $0x247c894c; BYTE $0x30 // mov qword [rsp + 48], r15 + QUAD $0x10266c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r12 + 16], 11 QUAD $0x10066c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r8 + 16], 12 - LONG $0x24248b4c // mov r12, qword [rsp] - QUAD $0x10266c203a0f4666; BYTE $0x0d // pinsrb xmm13, byte [rsi + r12 + 16], 13 - LONG $0x244c8b48; BYTE $0x18 // mov rcx, qword [rsp + 24] - QUAD $0x100e6c203a0f4466; BYTE $0x0e // pinsrb xmm13, byte [rsi + rcx + 16], 14 - QUAD $0x10166c203a0f4666; BYTE $0x0f // pinsrb xmm13, byte [rsi + r10 + 16], 15 + WORD $0x894c; BYTE $0xc3 // mov rbx, r8 + LONG $0x2444894c; BYTE $0x38 // mov qword [rsp + 56], r8 + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + QUAD $0x10166c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rdx + 16], 13 + LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] + QUAD $0x10166c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r10 + 16], 14 + LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] + QUAD $0x10066c203a0f4666; BYTE $0x0f // pinsrb xmm13, byte [rsi + r8 + 16], 15 LONG $0x6f0f4166; BYTE $0xdd // movdqa xmm3, xmm13 LONG $0xda0f4166; BYTE $0xdc // pminub xmm3, xmm12 LONG $0x740f4166; BYTE $0xdd // pcmpeqb xmm3, xmm13 QUAD $0x000110249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm3 - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] - QUAD $0x18067c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rsi + rax + 24], 1 - QUAD $0x18167c203a0f4466; BYTE $0x02 // pinsrb xmm15, byte [rsi + rdx + 24], 2 + QUAD $0x180e7c203a0f4666; BYTE $0x01 // pinsrb xmm15, byte [rsi + r9 + 24], 1 + QUAD $0x180e7c203a0f4466; BYTE $0x02 // pinsrb xmm15, byte [rsi + rcx + 24], 2 + LONG $0x244c8b4c; BYTE $0x40 // mov r9, qword [rsp + 64] QUAD $0x180e7c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r9 + 24], 3 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] - QUAD $0x180e7c203a0f4666; BYTE $0x04 // pinsrb xmm15, byte [rsi + r9 + 24], 4 + QUAD $0x18067c203a0f4466; BYTE $0x04 // pinsrb xmm15, byte [rsi + rax + 24], 4 QUAD $0x183e7c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rdi + 24], 5 - LONG $0x24448b48; BYTE $0x08 // mov rax, qword [rsp + 8] + LONG $0x24448b48; BYTE $0x68 // mov rax, qword [rsp + 104] QUAD $0x18067c203a0f4466; BYTE $0x06 // pinsrb xmm15, byte [rsi + rax + 24], 6 - QUAD $0x181e7c203a0f4666; BYTE $0x07 // pinsrb xmm15, byte [rsi + r11 + 24], 7 - QUAD $0x183e7c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r15 + 24], 8 - QUAD $0x181e7c203a0f4466; BYTE $0x09 // pinsrb xmm15, byte [rsi + rbx + 24], 9 - QUAD $0x18367c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r14 + 24], 10 - QUAD $0x182e7c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r13 + 24], 11 - QUAD $0x18067c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r8 + 24], 12 - QUAD $0x18267c203a0f4666; BYTE $0x0d // pinsrb xmm15, byte [rsi + r12 + 24], 13 - WORD $0x894d; BYTE $0xe7 // mov r15, r12 - QUAD $0x180e7c203a0f4466; BYTE $0x0e // pinsrb xmm15, byte [rsi + rcx + 24], 14 - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - QUAD $0x18167c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r10 + 24], 15 + QUAD $0x18367c203a0f4666; BYTE $0x07 // pinsrb xmm15, byte [rsi + r14 + 24], 7 + QUAD $0x182e7c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r13 + 24], 8 + WORD $0x894c; BYTE $0xe8 // mov rax, r13 + QUAD $0x181e7c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r11 + 24], 9 + QUAD $0x183e7c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r15 + 24], 10 + QUAD $0x18267c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r12 + 24], 11 + WORD $0x894c; BYTE $0xe1 // mov rcx, r12 + QUAD $0x181e7c203a0f4466; BYTE $0x0c // pinsrb xmm15, byte [rsi + rbx + 24], 12 + QUAD $0x18167c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rsi + rdx + 24], 13 + QUAD $0x18167c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r10 + 24], 14 + WORD $0x894d; BYTE $0xd5 // mov r13, r10 + QUAD $0x18067c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r8 + 24], 15 LONG $0x6f0f4166; BYTE $0xdf // movdqa xmm3, xmm15 LONG $0xda0f4166; BYTE $0xdc // pminub xmm3, xmm12 LONG $0x740f4166; BYTE $0xdf // pcmpeqb xmm3, xmm15 @@ -36724,104 +38111,106 @@ LBB7_67: LONG $0x6f0f4466; BYTE $0xf6 // movdqa xmm14, xmm6 LONG $0xda0f4566; BYTE $0xf4 // pminub xmm14, xmm12 LONG $0x740f4466; BYTE $0xf6 // pcmpeqb xmm14, xmm6 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] - LONG $0x0e54b60f; BYTE $0x0d // movzx edx, byte [rsi + rcx + 13] + QUAD $0x000000a024948b48 // mov rdx, qword [rsp + 160] + LONG $0x1654b60f; BYTE $0x0d // movzx edx, byte [rsi + rdx + 13] LONG $0xf26e0f66 // movd xmm6, edx - LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] - QUAD $0x03266c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r12 + 3], 1 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x02030e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 3], 2 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x0303166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 3], 3 - QUAD $0x030e6c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r9 + 3], 4 - LONG $0x24548b4c; BYTE $0x40 // mov r10, qword [rsp + 64] - QUAD $0x03166c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r10 + 3], 5 - LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + QUAD $0x01033e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 3], 1 + LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] + QUAD $0x031e6c203a0f4266; BYTE $0x02 // pinsrb xmm5, byte [rsi + r11 + 3], 2 + QUAD $0x030e6c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r9 + 3], 3 + LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] + QUAD $0x04031e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 3], 4 + LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] + QUAD $0x0503166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 3], 5 + LONG $0x24548b48; BYTE $0x68 // mov rdx, qword [rsp + 104] QUAD $0x0603166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 3], 6 - LONG $0x245c894c; BYTE $0x50 // mov qword [rsp + 80], r11 - QUAD $0x031e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r11 + 3], 7 - LONG $0x244c8b4c; BYTE $0x70 // mov r9, qword [rsp + 112] - QUAD $0x030e6c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r9 + 3], 8 - QUAD $0x09031e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 3], 9 - QUAD $0x000000a024b4894c // mov qword [rsp + 160], r14 - QUAD $0x03366c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r14 + 3], 10 - QUAD $0x032e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r13 + 3], 11 - QUAD $0x03066c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r8 + 3], 12 - QUAD $0x033e6c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r15 + 3], 13 - QUAD $0x0e03066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 3], 14 - LONG $0x247c8b4c; BYTE $0x20 // mov r15, qword [rsp + 32] - QUAD $0x033e6c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r15 + 3], 15 - QUAD $0x04264c203a0f4666; BYTE $0x01 // pinsrb xmm9, byte [rsi + r12 + 4], 1 - QUAD $0x040e4c203a0f4466; BYTE $0x02 // pinsrb xmm9, byte [rsi + rcx + 4], 2 - LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] - QUAD $0x043e4c203a0f4466; BYTE $0x03 // pinsrb xmm9, byte [rsi + rdi + 4], 3 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] - QUAD $0x043e4c203a0f4466; BYTE $0x04 // pinsrb xmm9, byte [rsi + rdi + 4], 4 - QUAD $0x04164c203a0f4666; BYTE $0x05 // pinsrb xmm9, byte [rsi + r10 + 4], 5 + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 + QUAD $0x03366c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r14 + 3], 7 + QUAD $0x0803066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 3], 8 + QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] + QUAD $0x033e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r15 + 3], 9 + LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] + QUAD $0x03266c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r12 + 3], 10 + QUAD $0x0b030e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 3], 11 + LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + QUAD $0x0c03066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 3], 12 + LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] + QUAD $0x03166c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r10 + 3], 13 + WORD $0x894d; BYTE $0xee // mov r14, r13 + QUAD $0x032e6c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r13 + 3], 14 + LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] + QUAD $0x032e6c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r13 + 3], 15 + QUAD $0x043e4c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rdi + 4], 1 + QUAD $0x041e4c203a0f4666; BYTE $0x02 // pinsrb xmm9, byte [rsi + r11 + 4], 2 + QUAD $0x040e4c203a0f4666; BYTE $0x03 // pinsrb xmm9, byte [rsi + r9 + 4], 3 + QUAD $0x041e4c203a0f4466; BYTE $0x04 // pinsrb xmm9, byte [rsi + rbx + 4], 4 + LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + QUAD $0x04064c203a0f4666; BYTE $0x05 // pinsrb xmm9, byte [rsi + r8 + 4], 5 QUAD $0x04164c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rdx + 4], 6 - QUAD $0x041e4c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r11 + 4], 7 - QUAD $0x040e4c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r9 + 4], 8 - QUAD $0x041e4c203a0f4466; BYTE $0x09 // pinsrb xmm9, byte [rsi + rbx + 4], 9 - QUAD $0x04364c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r14 + 4], 10 - QUAD $0x042e4c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r13 + 4], 11 - QUAD $0x04064c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r8 + 4], 12 - LONG $0x243c8b48 // mov rdi, qword [rsp] - QUAD $0x043e4c203a0f4466; BYTE $0x0d // pinsrb xmm9, byte [rsi + rdi + 4], 13 - QUAD $0x04064c203a0f4466; BYTE $0x0e // pinsrb xmm9, byte [rsi + rax + 4], 14 - QUAD $0x043e4c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r15 + 4], 15 - QUAD $0x05267c203a0f4266; BYTE $0x01 // pinsrb xmm7, byte [rsi + r12 + 5], 1 - QUAD $0x02050e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 5], 2 - LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] - QUAD $0x03053e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 5], 3 - LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] - QUAD $0x04053e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 5], 4 - QUAD $0x05167c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r10 + 5], 5 + QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x040e4c203a0f4466; BYTE $0x07 // pinsrb xmm9, byte [rsi + rcx + 4], 7 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x040e4c203a0f4466; BYTE $0x08 // pinsrb xmm9, byte [rsi + rcx + 4], 8 + QUAD $0x043e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r15 + 4], 9 + QUAD $0x04264c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r12 + 4], 10 + LONG $0x244c8b48; BYTE $0x28 // mov rcx, qword [rsp + 40] + QUAD $0x040e4c203a0f4466; BYTE $0x0b // pinsrb xmm9, byte [rsi + rcx + 4], 11 + WORD $0x8949; BYTE $0xc0 // mov r8, rax + QUAD $0x04064c203a0f4466; BYTE $0x0c // pinsrb xmm9, byte [rsi + rax + 4], 12 + QUAD $0x04164c203a0f4666; BYTE $0x0d // pinsrb xmm9, byte [rsi + r10 + 4], 13 + QUAD $0x04364c203a0f4666; BYTE $0x0e // pinsrb xmm9, byte [rsi + r14 + 4], 14 + QUAD $0x042e4c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r13 + 4], 15 + QUAD $0x01053e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 5], 1 + QUAD $0x051e7c203a0f4266; BYTE $0x02 // pinsrb xmm7, byte [rsi + r11 + 5], 2 + QUAD $0x050e7c203a0f4266; BYTE $0x03 // pinsrb xmm7, byte [rsi + r9 + 5], 3 + QUAD $0x04051e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 5], 4 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0505067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 5], 5 QUAD $0x0605167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 5], 6 - QUAD $0x051e7c203a0f4266; BYTE $0x07 // pinsrb xmm7, byte [rsi + r11 + 5], 7 - QUAD $0x050e7c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r9 + 5], 8 - QUAD $0x09051e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 5], 9 - QUAD $0x05367c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r14 + 5], 10 - QUAD $0x052e7c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r13 + 5], 11 + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + QUAD $0x0705067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 5], 7 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x08050e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 5], 8 + QUAD $0x053e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r15 + 5], 9 + QUAD $0x05267c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r12 + 5], 10 + LONG $0x24448b48; BYTE $0x28 // mov rax, qword [rsp + 40] + QUAD $0x0b05067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 5], 11 QUAD $0x05067c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r8 + 5], 12 - LONG $0x243c8b48 // mov rdi, qword [rsp] - QUAD $0x0d053e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 5], 13 - QUAD $0x0e05067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 5], 14 - QUAD $0x053e7c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r15 + 5], 15 - QUAD $0x062644203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r12 + 6], 1 - QUAD $0x02060e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 6], 2 - WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] - QUAD $0x062644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r12 + 6], 3 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - QUAD $0x04060e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 6], 4 - QUAD $0x061644203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r10 + 6], 5 + QUAD $0x05167c203a0f4266; BYTE $0x0d // pinsrb xmm7, byte [rsi + r10 + 5], 13 + QUAD $0x05367c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rsi + r14 + 5], 14 + QUAD $0x052e7c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r13 + 5], 15 + QUAD $0x01063e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 6], 1 + QUAD $0x061e44203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r11 + 6], 2 + QUAD $0x060e44203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r9 + 6], 3 + QUAD $0x04061e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 6], 4 + LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] + QUAD $0x05063e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 6], 5 QUAD $0x06061644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 6], 6 - QUAD $0x061e44203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r11 + 6], 7 - QUAD $0x060e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r9 + 6], 8 - WORD $0x894d; BYTE $0xcb // mov r11, r9 - QUAD $0x09061e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 6], 9 - QUAD $0x063644203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r14 + 6], 10 - QUAD $0x062e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r13 + 6], 11 - WORD $0x894d; BYTE $0xee // mov r14, r13 + QUAD $0x00000090248c8b4c // mov r9, qword [rsp + 144] + QUAD $0x060e44203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r9 + 6], 7 + QUAD $0x08060e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 6], 8 + QUAD $0x063e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r15 + 6], 9 + QUAD $0x062644203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r12 + 6], 10 + QUAD $0x0b060644203a0f66 // pinsrb xmm0, byte [rsi + rax + 6], 11 QUAD $0x060644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r8 + 6], 12 - WORD $0x894d; BYTE $0xc5 // mov r13, r8 - LONG $0x24048b4c // mov r8, qword [rsp] - QUAD $0x060644203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r8 + 6], 13 + QUAD $0x061644203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r10 + 6], 13 LONG $0xdf0f4466; BYTE $0xc4 // pandn xmm8, xmm4 - QUAD $0x0e060644203a0f66 // pinsrb xmm0, byte [rsi + rax + 6], 14 + QUAD $0x063644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r14 + 6], 14 + WORD $0x894d; BYTE $0xf4 // mov r12, r14 QUAD $0x000000b0a56f0f66 // movdqa xmm4, oword 176[rbp] /* [rip + .LCPI7_11] */ LONG $0xdf0f4466; BYTE $0xf4 // pandn xmm14, xmm4 LONG $0xeb0f4566; BYTE $0xf0 // por xmm14, xmm8 LONG $0x6f0f4466; BYTE $0xfd // movdqa xmm15, xmm5 LONG $0xda0f4566; BYTE $0xfc // pminub xmm15, xmm12 LONG $0x740f4466; BYTE $0xfd // pcmpeqb xmm15, xmm5 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - LONG $0x0654b60f; BYTE $0x0e // movzx edx, byte [rsi + rax + 14] + QUAD $0x000000a0249c8b48 // mov rbx, qword [rsp + 160] + LONG $0x1e54b60f; BYTE $0x0e // movzx edx, byte [rsi + rbx + 14] LONG $0xea6e0f66 // movd xmm5, edx QUAD $0x000000c0a56f0f66 // movdqa xmm4, oword 192[rbp] /* [rip + .LCPI7_12] */ LONG $0xdf0f4466; BYTE $0xfc // pandn xmm15, xmm4 LONG $0xeb0f4566; BYTE $0xfe // por xmm15, xmm14 - LONG $0x0654b60f; BYTE $0x0f // movzx edx, byte [rsi + rax + 15] + LONG $0x1e54b60f; BYTE $0x0f // movzx edx, byte [rsi + rbx + 15] LONG $0x6e0f4466; BYTE $0xc2 // movd xmm8, edx LONG $0xdb760f66 // pcmpeqd xmm3, xmm3 LONG $0xf80f4466; BYTE $0xeb // psubb xmm13, xmm3 @@ -36833,10 +38222,9 @@ LBB7_67: LONG $0x6f0f4466; BYTE $0xcf // movdqa xmm9, xmm7 LONG $0xda0f4566; BYTE $0xcc // pminub xmm9, xmm12 LONG $0x740f4466; BYTE $0xcf // pcmpeqb xmm9, xmm7 - LONG $0x0654b60f; BYTE $0x11 // movzx edx, byte [rsi + rax + 17] + LONG $0x1e54b60f; BYTE $0x11 // movzx edx, byte [rsi + rbx + 17] LONG $0xfa6e0f66 // movd xmm7, edx - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] - QUAD $0x0f063e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 6], 15 + QUAD $0x062e44203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r13 + 6], 15 QUAD $0x000000d08d6f0f66 // movdqa xmm1, oword 208[rbp] /* [rip + .LCPI7_13] */ LONG $0xe1df0f66 // pandn xmm4, xmm1 QUAD $0x000000e08d6f0f66 // movdqa xmm1, oword 224[rbp] /* [rip + .LCPI7_14] */ @@ -36845,39 +38233,42 @@ LBB7_67: LONG $0xe06f0f66 // movdqa xmm4, xmm0 LONG $0xda0f4166; BYTE $0xe4 // pminub xmm4, xmm12 LONG $0xe0740f66 // pcmpeqb xmm4, xmm0 - LONG $0x0654b60f; BYTE $0x12 // movzx edx, byte [rsi + rax + 18] + LONG $0x1e54b60f; BYTE $0x12 // movzx edx, byte [rsi + rbx + 18] LONG $0xca6e0f66 // movd xmm1, edx - QUAD $0x0000b024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 176] - LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] - QUAD $0x01070e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 7], 1 - QUAD $0x073e44203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r15 + 7], 2 - WORD $0x894c; BYTE $0xe3 // mov rbx, r12 - QUAD $0x072644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r12 + 7], 3 - LONG $0x244c8b4c; BYTE $0x30 // mov r9, qword [rsp + 48] - QUAD $0x070e44203a0f4266; BYTE $0x04 // pinsrb xmm0, byte [rsi + r9 + 7], 4 - QUAD $0x071644203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r10 + 7], 5 - LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] + QUAD $0x0000c024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 192] + LONG $0x24548b4c; BYTE $0x60 // mov r10, qword [rsp + 96] + QUAD $0x071644203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r10 + 7], 1 + LONG $0x245c8b4c; BYTE $0x20 // mov r11, qword [rsp + 32] + QUAD $0x071e44203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r11 + 7], 2 + LONG $0x24748b4c; BYTE $0x40 // mov r14, qword [rsp + 64] + QUAD $0x073644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r14 + 7], 3 + LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] + QUAD $0x04070e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 7], 4 + QUAD $0x05073e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 7], 5 + LONG $0x247c8b4c; BYTE $0x68 // mov r15, qword [rsp + 104] QUAD $0x073e44203a0f4266; BYTE $0x06 // pinsrb xmm0, byte [rsi + r15 + 7], 6 - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] - QUAD $0x07071644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 7], 7 - QUAD $0x071e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r11 + 7], 8 - LONG $0x24648b4c; BYTE $0x78 // mov r12, qword [rsp + 120] - QUAD $0x072644203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r12 + 7], 9 - QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] - QUAD $0x071e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r11 + 7], 10 - QUAD $0x073644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 7], 11 - QUAD $0x072e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r13 + 7], 12 - QUAD $0x070644203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r8 + 7], 13 - LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] - QUAD $0x0e071644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 7], 14 - QUAD $0x0f073e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 7], 15 + QUAD $0x070e44203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r9 + 7], 7 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x08070e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 7], 8 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x09070e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 7], 9 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x0a070e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 7], 10 + LONG $0x24448b4c; BYTE $0x28 // mov r8, qword [rsp + 40] + QUAD $0x070644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r8 + 7], 11 + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + QUAD $0x0c070e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 7], 12 + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + QUAD $0x0d071644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 7], 13 + QUAD $0x072644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r12 + 7], 14 + QUAD $0x072e44203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r13 + 7], 15 QUAD $0x000000f09d6f0f66 // movdqa xmm3, oword 240[rbp] /* [rip + .LCPI7_15] */ LONG $0xe3df0f66 // pandn xmm4, xmm3 LONG $0xeb0f4166; BYTE $0xe1 // por xmm4, xmm9 LONG $0x6f0f4466; BYTE $0xc8 // movdqa xmm9, xmm0 LONG $0xda0f4566; BYTE $0xcc // pminub xmm9, xmm12 LONG $0x740f4466; BYTE $0xc8 // pcmpeqb xmm9, xmm0 - LONG $0x0654b60f; BYTE $0x13 // movzx edx, byte [rsi + rax + 19] + LONG $0x1e54b60f; BYTE $0x13 // movzx edx, byte [rsi + rbx + 19] LONG $0xda6e0f66 // movd xmm3, edx LONG $0xef0f4566; BYTE $0xce // pxor xmm9, xmm14 LONG $0x710f4166; WORD $0x07f1 // psllw xmm9, 7 @@ -36885,55 +38276,56 @@ LBB7_67: LONG $0xdb0f4466; BYTE $0xc8 // pand xmm9, xmm0 LONG $0xeb0f4466; BYTE $0xcc // por xmm9, xmm4 LONG $0x6f0f4166; BYTE $0xe1 // movdqa xmm4, xmm9 - LONG $0x0654b60f; BYTE $0x14 // movzx edx, byte [rsi + rax + 20] + LONG $0x1e54b60f; BYTE $0x14 // movzx edx, byte [rsi + rbx + 20] LONG $0x6e0f4466; BYTE $0xca // movd xmm9, edx - QUAD $0x0000c024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 192] - QUAD $0x01090e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 9], 1 - WORD $0x8948; BYTE $0xc8 // mov rax, rcx - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x02090e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 9], 2 - QUAD $0x03091e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 9], 3 - QUAD $0x090e44203a0f4266; BYTE $0x04 // pinsrb xmm0, byte [rsi + r9 + 9], 4 - QUAD $0x091644203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r10 + 9], 5 + QUAD $0x0000d024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 208] + WORD $0x894d; BYTE $0xd4 // mov r12, r10 + QUAD $0x091644203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r10 + 9], 1 + QUAD $0x091e44203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r11 + 9], 2 + WORD $0x894d; BYTE $0xf5 // mov r13, r14 + QUAD $0x093644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r14 + 9], 3 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x04090644203a0f66 // pinsrb xmm0, byte [rsi + rax + 9], 4 + QUAD $0x05093e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 9], 5 + WORD $0x894d; BYTE $0xf9 // mov r9, r15 QUAD $0x093e44203a0f4266; BYTE $0x06 // pinsrb xmm0, byte [rsi + r15 + 9], 6 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] - QUAD $0x07093e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 9], 7 - LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] - QUAD $0x093e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r15 + 9], 8 - QUAD $0x092644203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r12 + 9], 9 - WORD $0x894d; BYTE $0xe2 // mov r10, r12 - QUAD $0x091e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r11 + 9], 10 - QUAD $0x093644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 9], 11 - QUAD $0x092e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r13 + 9], 12 - LONG $0x246c894c; BYTE $0x68 // mov qword [rsp + 104], r13 - LONG $0x24248b4c // mov r12, qword [rsp] - QUAD $0x092644203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r12 + 9], 13 - LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] - QUAD $0x090e44203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r9 + 9], 14 - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] - QUAD $0x0f091644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 9], 15 - QUAD $0x010a0654203a0f66 // pinsrb xmm2, byte [rsi + rax + 10], 1 - QUAD $0x020a0e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 10], 2 - QUAD $0x030a1e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 10], 3 - LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] - QUAD $0x040a1e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 10], 4 - LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] - QUAD $0x050a0654203a0f66 // pinsrb xmm2, byte [rsi + rax + 10], 5 - QUAD $0x0a0654203a0f4266; BYTE $0x06 // pinsrb xmm2, byte [rsi + r8 + 10], 6 - QUAD $0x070a3e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 10], 7 - QUAD $0x0a3e54203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r15 + 10], 8 - QUAD $0x0a1654203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r10 + 10], 9 - QUAD $0x0a1e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r11 + 10], 10 - QUAD $0x0a3654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r14 + 10], 11 - QUAD $0x0a2e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r13 + 10], 12 - QUAD $0x0a2654203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r12 + 10], 13 - QUAD $0x0a0e54203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r9 + 10], 14 - WORD $0x894d; BYTE $0xcb // mov r11, r9 - QUAD $0x0f0a1654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 10], 15 - WORD $0x8949; BYTE $0xd4 // mov r12, rdx + QUAD $0x0000009024948b48 // mov rdx, qword [rsp + 144] + QUAD $0x07091644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 9], 7 + LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] + QUAD $0x093644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r14 + 9], 8 + QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] + QUAD $0x091e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r11 + 9], 9 + LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] + QUAD $0x093e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r15 + 9], 10 + QUAD $0x090644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r8 + 9], 11 + QUAD $0x0c090e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 9], 12 + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] + QUAD $0x0d091e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 9], 13 + LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] + QUAD $0x091644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r10 + 9], 14 + LONG $0x24448b48; BYTE $0x48 // mov rax, qword [rsp + 72] + QUAD $0x0f090644203a0f66 // pinsrb xmm0, byte [rsi + rax + 9], 15 + QUAD $0x0a2654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r12 + 10], 1 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x020a0654203a0f66 // pinsrb xmm2, byte [rsi + rax + 10], 2 + QUAD $0x0a2e54203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r13 + 10], 3 + LONG $0x24648b4c; BYTE $0x58 // mov r12, qword [rsp + 88] + QUAD $0x0a2654203a0f4266; BYTE $0x04 // pinsrb xmm2, byte [rsi + r12 + 10], 4 + QUAD $0x050a3e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 10], 5 + QUAD $0x0a0e54203a0f4266; BYTE $0x06 // pinsrb xmm2, byte [rsi + r9 + 10], 6 + QUAD $0x070a1654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 10], 7 + WORD $0x8948; BYTE $0xd7 // mov rdi, rdx + QUAD $0x0a3654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r14 + 10], 8 + QUAD $0x0a1e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r11 + 10], 9 + QUAD $0x0a3e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r15 + 10], 10 + QUAD $0x0a0654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r8 + 10], 11 + QUAD $0x0c0a0e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 10], 12 + QUAD $0x0d0a1e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 10], 13 + QUAD $0x0a1654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r10 + 10], 14 + LONG $0x246c8b4c; BYTE $0x48 // mov r13, qword [rsp + 72] + QUAD $0x0a2e54203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r13 + 10], 15 LONG $0xeb0f4166; BYTE $0xe7 // por xmm4, xmm15 - QUAD $0x0000c024a47f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm4 + QUAD $0x0000d024a47f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm4 LONG $0xe06f0f66 // movdqa xmm4, xmm0 LONG $0xda0f4166; BYTE $0xe4 // pminub xmm4, xmm12 LONG $0xe0740f66 // pcmpeqb xmm4, xmm0 @@ -36944,35 +38336,39 @@ LBB7_67: LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0xda0f4166; BYTE $0xc4 // pminub xmm0, xmm12 LONG $0xc2740f66 // pcmpeqb xmm0, xmm2 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] LONG $0x0654b60f; BYTE $0x15 // movzx edx, byte [rsi + rax + 21] LONG $0xe26e0f66 // movd xmm4, edx LONG $0xdf0f4566; BYTE $0xd6 // pandn xmm10, xmm14 - LONG $0x24448b4c; BYTE $0x38 // mov r8, qword [rsp + 56] - QUAD $0x0b065c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rsi + r8 + 11], 1 - WORD $0x8949; BYTE $0xcd // mov r13, rcx - QUAD $0x0b0e5c203a0f4466; BYTE $0x02 // pinsrb xmm11, byte [rsi + rcx + 11], 2 - LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] - QUAD $0x0b0e5c203a0f4466; BYTE $0x03 // pinsrb xmm11, byte [rsi + rcx + 11], 3 - QUAD $0x0b1e5c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rbx + 11], 4 - LONG $0x247c8b48; BYTE $0x40 // mov rdi, qword [rsp + 64] - QUAD $0x0b3e5c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rdi + 11], 5 - LONG $0x24548b48; BYTE $0x08 // mov rdx, qword [rsp + 8] - QUAD $0x0b165c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rdx + 11], 6 - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] - QUAD $0x0b165c203a0f4466; BYTE $0x07 // pinsrb xmm11, byte [rsi + rdx + 11], 7 + LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] + QUAD $0x0b1e5c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rsi + r11 + 11], 1 + LONG $0x24548b4c; BYTE $0x20 // mov r10, qword [rsp + 32] + QUAD $0x0b165c203a0f4666; BYTE $0x02 // pinsrb xmm11, byte [rsi + r10 + 11], 2 + LONG $0x24748b4c; BYTE $0x40 // mov r14, qword [rsp + 64] + QUAD $0x0b365c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r14 + 11], 3 + WORD $0x894d; BYTE $0xe0 // mov r8, r12 + QUAD $0x0b265c203a0f4666; BYTE $0x04 // pinsrb xmm11, byte [rsi + r12 + 11], 4 + LONG $0x244c8b4c; BYTE $0x50 // mov r9, qword [rsp + 80] + QUAD $0x0b0e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r9 + 11], 5 + LONG $0x244c8b48; BYTE $0x68 // mov rcx, qword [rsp + 104] + QUAD $0x0b0e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rcx + 11], 6 + QUAD $0x0b3e5c203a0f4466; BYTE $0x07 // pinsrb xmm11, byte [rsi + rdi + 11], 7 + WORD $0x8949; BYTE $0xfc // mov r12, rdi + LONG $0x247c8b4c; BYTE $0x70 // mov r15, qword [rsp + 112] QUAD $0x0b3e5c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r15 + 11], 8 - WORD $0x894d; BYTE $0xd1 // mov r9, r10 - QUAD $0x0b165c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r10 + 11], 9 - QUAD $0x000000a024948b4c // mov r10, qword [rsp + 160] - QUAD $0x0b165c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r10 + 11], 10 - QUAD $0x0b365c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r14 + 11], 11 - LONG $0x24748b4c; BYTE $0x68 // mov r14, qword [rsp + 104] - QUAD $0x0b365c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r14 + 11], 12 - LONG $0x241c8b48 // mov rbx, qword [rsp] - QUAD $0x0b1e5c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rbx + 11], 13 - QUAD $0x0b1e5c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r11 + 11], 14 - QUAD $0x0b265c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r12 + 11], 15 + QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] + QUAD $0x0b1e5c203a0f4466; BYTE $0x09 // pinsrb xmm11, byte [rsi + rbx + 11], 9 + LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + QUAD $0x0b165c203a0f4466; BYTE $0x0a // pinsrb xmm11, byte [rsi + rdx + 11], 10 + LONG $0x24548b48; BYTE $0x28 // mov rdx, qword [rsp + 40] + QUAD $0x0b165c203a0f4466; BYTE $0x0b // pinsrb xmm11, byte [rsi + rdx + 11], 11 + LONG $0x247c8b48; BYTE $0x38 // mov rdi, qword [rsp + 56] + QUAD $0x0b3e5c203a0f4466; BYTE $0x0c // pinsrb xmm11, byte [rsi + rdi + 11], 12 + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + QUAD $0x0b165c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rdx + 11], 13 + LONG $0x24548b48; BYTE $0x18 // mov rdx, qword [rsp + 24] + QUAD $0x0b165c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rdx + 11], 14 + QUAD $0x0b2e5c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r13 + 11], 15 QUAD $0x000000b085df0f66 // pandn xmm0, oword 176[rbp] /* [rip + .LCPI7_11] */ LONG $0xeb0f4166; BYTE $0xc2 // por xmm0, xmm10 LONG $0x6f0f4566; BYTE $0xd3 // movdqa xmm10, xmm11 @@ -36984,49 +38380,50 @@ LBB7_67: LONG $0xeb0f4466; BYTE $0xd0 // por xmm10, xmm0 LONG $0x0654b60f; BYTE $0x17 // movzx edx, byte [rsi + rax + 23] LONG $0x6e0f4466; BYTE $0xda // movd xmm11, edx + WORD $0x894c; BYTE $0xd8 // mov rax, r11 QUAD $0x00013024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 304] - QUAD $0x0c0644203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r8 + 12], 1 - WORD $0x894c; BYTE $0xe8 // mov rax, r13 - QUAD $0x0c2e44203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r13 + 12], 2 - WORD $0x8948; BYTE $0xca // mov rdx, rcx - QUAD $0x030c0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 12], 3 - LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] - QUAD $0x0c3e44203a0f4266; BYTE $0x04 // pinsrb xmm0, byte [rsi + r15 + 12], 4 - QUAD $0x050c3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 12], 5 - LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] + QUAD $0x0c1e44203a0f4266; BYTE $0x01 // pinsrb xmm0, byte [rsi + r11 + 12], 1 + QUAD $0x0c1644203a0f4266; BYTE $0x02 // pinsrb xmm0, byte [rsi + r10 + 12], 2 + WORD $0x894d; BYTE $0xd3 // mov r11, r10 + WORD $0x894c; BYTE $0xf2 // mov rdx, r14 + QUAD $0x0c3644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r14 + 12], 3 + QUAD $0x0c0644203a0f4266; BYTE $0x04 // pinsrb xmm0, byte [rsi + r8 + 12], 4 + QUAD $0x0c0e44203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r9 + 12], 5 QUAD $0x060c0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 12], 6 - LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] - QUAD $0x070c3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 12], 7 - LONG $0x245c8b4c; BYTE $0x70 // mov r11, qword [rsp + 112] - QUAD $0x0c1e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r11 + 12], 8 - QUAD $0x0c0e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 12], 9 - QUAD $0x0c1644203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r10 + 12], 10 - QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] - QUAD $0x0c2e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r13 + 12], 11 - QUAD $0x0c3644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r14 + 12], 12 + QUAD $0x0c2644203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r12 + 12], 7 + QUAD $0x0c3e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r15 + 12], 8 + WORD $0x8949; BYTE $0xdc // mov r12, rbx + QUAD $0x090c1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 12], 9 + LONG $0x245c8b48; BYTE $0x30 // mov rbx, qword [rsp + 48] + QUAD $0x0a0c1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 12], 10 + LONG $0x24748b4c; BYTE $0x28 // mov r14, qword [rsp + 40] + QUAD $0x0c3644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 12], 11 + QUAD $0x0c0c3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 12], 12 + LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] QUAD $0x0d0c1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 12], 13 - LONG $0x24548b4c; BYTE $0x18 // mov r10, qword [rsp + 24] - QUAD $0x0c1644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r10 + 12], 14 - QUAD $0x0c2644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r12 + 12], 15 - QUAD $0x0d0674203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r8 + 13], 1 - QUAD $0x020d0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 13], 2 + LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] + QUAD $0x0c0e44203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r9 + 12], 14 + WORD $0x894d; BYTE $0xea // mov r10, r13 + QUAD $0x0c2e44203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r13 + 12], 15 + QUAD $0x010d0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 13], 1 + QUAD $0x0d1e74203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rsi + r11 + 13], 2 QUAD $0x030d1674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 13], 3 - WORD $0x894c; BYTE $0xf8 // mov rax, r15 - QUAD $0x0d3e74203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r15 + 13], 4 - LONG $0x247c8b4c; BYTE $0x40 // mov r15, qword [rsp + 64] - QUAD $0x0d3e74203a0f4266; BYTE $0x05 // pinsrb xmm6, byte [rsi + r15 + 13], 5 + QUAD $0x0d0674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r8 + 13], 4 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x050d0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 13], 5 QUAD $0x060d0e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 13], 6 - QUAD $0x070d3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 13], 7 - QUAD $0x0d1e74203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r11 + 13], 8 - QUAD $0x0d0e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r9 + 13], 9 - QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] - QUAD $0x0a0d3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 13], 10 - QUAD $0x0d2e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r13 + 13], 11 - QUAD $0x0d3674203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r14 + 13], 12 + QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] + QUAD $0x0d1e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r11 + 13], 7 + QUAD $0x0d3e74203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r15 + 13], 8 + QUAD $0x0d2674203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r12 + 13], 9 + LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + QUAD $0x0a0d1674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 13], 10 + QUAD $0x0d3674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r14 + 13], 11 + QUAD $0x0c0d3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 13], 12 QUAD $0x0d0d1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 13], 13 - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - QUAD $0x0d1674203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r10 + 13], 14 - QUAD $0x0d2674203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r12 + 13], 15 + QUAD $0x0d0e74203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r9 + 13], 14 + QUAD $0x0d2e74203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r13 + 13], 15 + WORD $0x894d; BYTE $0xe9 // mov r9, r13 QUAD $0x000100adf80f4466; BYTE $0x00 // psubb xmm13, oword 256[rbp] /* [rip + .LCPI7_16] */ LONG $0xeb0f4566; BYTE $0xd5 // por xmm10, xmm13 LONG $0xd06f0f66 // movdqa xmm2, xmm0 @@ -37035,103 +38432,100 @@ LBB7_67: LONG $0xd0740f66 // pcmpeqb xmm2, xmm0 LONG $0xc66f0f66 // movdqa xmm0, xmm6 LONG $0xda0f4166; BYTE $0xc4 // pminub xmm0, xmm12 - LONG $0xc6740f66 // pcmpeqb xmm0, xmm6 - LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] - LONG $0x0e54b60f; BYTE $0x19 // movzx edx, byte [rsi + rcx + 25] - LONG $0x6e0f4466; BYTE $0xe2 // movd xmm12, edx - LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] - QUAD $0x010e1e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 14], 1 - LONG $0x244c8b48; BYTE $0x58 // mov rcx, qword [rsp + 88] - QUAD $0x020e0e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 14], 2 - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] - QUAD $0x0e266c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r12 + 14], 3 - WORD $0x8948; BYTE $0xc2 // mov rdx, rax - QUAD $0x040e066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 14], 4 - WORD $0x894d; BYTE $0xf9 // mov r9, r15 - QUAD $0x0e3e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r15 + 14], 5 - LONG $0x244c8b48; BYTE $0x08 // mov rcx, qword [rsp + 8] + LONG $0xc6740f66 // pcmpeqb xmm0, xmm6 + QUAD $0x000000a024948b48 // mov rdx, qword [rsp + 160] + LONG $0x1654b60f; BYTE $0x19 // movzx edx, byte [rsi + rdx + 25] + LONG $0x6e0f4466; BYTE $0xe2 // movd xmm12, edx + LONG $0x245c8b48; BYTE $0x60 // mov rbx, qword [rsp + 96] + QUAD $0x010e1e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 14], 1 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x0e2e6c203a0f4266; BYTE $0x02 // pinsrb xmm5, byte [rsi + r13 + 14], 2 + LONG $0x24548b48; BYTE $0x40 // mov rdx, qword [rsp + 64] + QUAD $0x030e166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 14], 3 + QUAD $0x0e066c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r8 + 14], 4 + WORD $0x8949; BYTE $0xc0 // mov r8, rax + QUAD $0x050e066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 14], 5 QUAD $0x060e0e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 14], 6 - LONG $0x24548b4c; BYTE $0x50 // mov r10, qword [rsp + 80] - QUAD $0x0e166c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r10 + 14], 7 - WORD $0x894d; BYTE $0xdf // mov r15, r11 - QUAD $0x0e1e6c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r11 + 14], 8 - LONG $0x245c8b4c; BYTE $0x78 // mov r11, qword [rsp + 120] - QUAD $0x0e1e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r11 + 14], 9 - QUAD $0x0a0e3e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 14], 10 - WORD $0x894d; BYTE $0xee // mov r14, r13 - QUAD $0x0e2e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r13 + 14], 11 - LONG $0x246c8b4c; BYTE $0x68 // mov r13, qword [rsp + 104] - QUAD $0x0e2e6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r13 + 14], 12 - LONG $0x24048b48 // mov rax, qword [rsp] + WORD $0x894d; BYTE $0xda // mov r10, r11 + QUAD $0x0e1e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r11 + 14], 7 + WORD $0x894d; BYTE $0xfe // mov r14, r15 + QUAD $0x0e3e6c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r15 + 14], 8 + WORD $0x894c; BYTE $0xe7 // mov rdi, r12 + QUAD $0x0e266c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r12 + 14], 9 + LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] + QUAD $0x0e1e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r11 + 14], 10 + LONG $0x247c8b4c; BYTE $0x28 // mov r15, qword [rsp + 40] + QUAD $0x0e3e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 14], 11 + LONG $0x24648b4c; BYTE $0x38 // mov r12, qword [rsp + 56] + QUAD $0x0e266c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r12 + 14], 12 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x0d0e066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 14], 13 - QUAD $0x0e066c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r8 + 14], 14 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0f0e066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 14], 15 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0e0e066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 14], 14 + QUAD $0x0e0e6c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r9 + 14], 15 QUAD $0x0f1e44203a0f4466; BYTE $0x01 // pinsrb xmm8, byte [rsi + rbx + 15], 1 - LONG $0x24448b4c; BYTE $0x58 // mov r8, qword [rsp + 88] - QUAD $0x0f0644203a0f4666; BYTE $0x02 // pinsrb xmm8, byte [rsi + r8 + 15], 2 - QUAD $0x0f2644203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r12 + 15], 3 - QUAD $0x0f1644203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rdx + 15], 4 - QUAD $0x0f0e44203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r9 + 15], 5 + QUAD $0x0f2e44203a0f4666; BYTE $0x02 // pinsrb xmm8, byte [rsi + r13 + 15], 2 + QUAD $0x0f1644203a0f4466; BYTE $0x03 // pinsrb xmm8, byte [rsi + rdx + 15], 3 + LONG $0x246c8b4c; BYTE $0x58 // mov r13, qword [rsp + 88] + QUAD $0x0f2e44203a0f4666; BYTE $0x04 // pinsrb xmm8, byte [rsi + r13 + 15], 4 + QUAD $0x0f0644203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r8 + 15], 5 QUAD $0x0f0e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rcx + 15], 6 QUAD $0x0f1644203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r10 + 15], 7 - QUAD $0x0f3e44203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r15 + 15], 8 - QUAD $0x0f1e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r11 + 15], 9 - QUAD $0x0f3e44203a0f4466; BYTE $0x0a // pinsrb xmm8, byte [rsi + rdi + 15], 10 - QUAD $0x0f3644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r14 + 15], 11 - QUAD $0x0f2e44203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r13 + 15], 12 - LONG $0x24048b48 // mov rax, qword [rsp] + QUAD $0x0f3644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r14 + 15], 8 + QUAD $0x0f3e44203a0f4466; BYTE $0x09 // pinsrb xmm8, byte [rsi + rdi + 15], 9 + QUAD $0x0f1e44203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r11 + 15], 10 + QUAD $0x0f3e44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r15 + 15], 11 + QUAD $0x0f2644203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r12 + 15], 12 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] QUAD $0x0f0644203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rsi + rax + 15], 13 - LONG $0x24448b4c; BYTE $0x18 // mov r8, qword [rsp + 24] - QUAD $0x0f0644203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r8 + 15], 14 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0f0644203a0f4466; BYTE $0x0f // pinsrb xmm8, byte [rsi + rax + 15], 15 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0f0644203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rax + 15], 14 + QUAD $0x0f0e44203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r9 + 15], 15 QUAD $0x01111e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 17], 1 - LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x0211067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 17], 2 - QUAD $0x11267c203a0f4266; BYTE $0x03 // pinsrb xmm7, byte [rsi + r12 + 17], 3 - QUAD $0x0411167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 17], 4 - QUAD $0x110e7c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r9 + 17], 5 + QUAD $0x0311167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 17], 3 + QUAD $0x112e7c203a0f4266; BYTE $0x04 // pinsrb xmm7, byte [rsi + r13 + 17], 4 + QUAD $0x11067c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r8 + 17], 5 QUAD $0x06110e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 17], 6 QUAD $0x11167c203a0f4266; BYTE $0x07 // pinsrb xmm7, byte [rsi + r10 + 17], 7 - QUAD $0x113e7c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r15 + 17], 8 - QUAD $0x111e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r11 + 17], 9 - QUAD $0x0a113e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 17], 10 - QUAD $0x11367c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r14 + 17], 11 - QUAD $0x112e7c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r13 + 17], 12 - LONG $0x24048b48 // mov rax, qword [rsp] - QUAD $0x0d11067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 17], 13 - QUAD $0x11067c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rsi + r8 + 17], 14 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0f11067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 17], 15 + QUAD $0x11367c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r14 + 17], 8 + QUAD $0x09113e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 17], 9 + QUAD $0x111e7c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r11 + 17], 10 + QUAD $0x113e7c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r15 + 17], 11 + QUAD $0x11267c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r12 + 17], 12 + LONG $0x246c8b4c; BYTE $0x10 // mov r13, qword [rsp + 16] + QUAD $0x112e7c203a0f4266; BYTE $0x0d // pinsrb xmm7, byte [rsi + r13 + 17], 13 + LONG $0x24448b48; BYTE $0x18 // mov rax, qword [rsp + 24] + QUAD $0x0e11067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 17], 14 + QUAD $0x110e7c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r9 + 17], 15 QUAD $0x01121e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 18], 1 - LONG $0x245c8b48; BYTE $0x58 // mov rbx, qword [rsp + 88] + LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] QUAD $0x02121e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 18], 2 - QUAD $0x12264c203a0f4266; BYTE $0x03 // pinsrb xmm1, byte [rsi + r12 + 18], 3 - QUAD $0x0412164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 18], 4 - QUAD $0x120e4c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r9 + 18], 5 + QUAD $0x0312164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 18], 3 + LONG $0x24448b48; BYTE $0x58 // mov rax, qword [rsp + 88] + QUAD $0x0412064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 18], 4 + QUAD $0x12064c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r8 + 18], 5 QUAD $0x06120e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 18], 6 QUAD $0x12164c203a0f4266; BYTE $0x07 // pinsrb xmm1, byte [rsi + r10 + 18], 7 - QUAD $0x123e4c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r15 + 18], 8 - WORD $0x894d; BYTE $0xfc // mov r12, r15 - QUAD $0x121e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r11 + 18], 9 - QUAD $0x0a123e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 18], 10 - WORD $0x8949; BYTE $0xfa // mov r10, rdi - QUAD $0x12364c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r14 + 18], 11 - WORD $0x894d; BYTE $0xf7 // mov r15, r14 - QUAD $0x122e4c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r13 + 18], 12 - LONG $0x240c8b4c // mov r9, qword [rsp] - QUAD $0x120e4c203a0f4266; BYTE $0x0d // pinsrb xmm1, byte [rsi + r9 + 18], 13 + QUAD $0x12364c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r14 + 18], 8 + WORD $0x894d; BYTE $0xf2 // mov r10, r14 + QUAD $0x09123e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 18], 9 + QUAD $0x121e4c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r11 + 18], 10 + QUAD $0x123e4c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r15 + 18], 11 + QUAD $0x12264c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r12 + 18], 12 + QUAD $0x122e4c203a0f4266; BYTE $0x0d // pinsrb xmm1, byte [rsi + r13 + 18], 13 QUAD $0x000000d095df0f66 // pandn xmm2, oword 208[rbp] /* [rip + .LCPI7_13] */ QUAD $0x000000e085df0f66 // pandn xmm0, oword 224[rbp] /* [rip + .LCPI7_14] */ LONG $0xc2eb0f66 // por xmm0, xmm2 LONG $0xd56f0f66 // movdqa xmm2, xmm5 LONG $0xda0f4166; BYTE $0xd5 // pminub xmm2, xmm13 LONG $0xd5740f66 // pcmpeqb xmm2, xmm5 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + QUAD $0x000000a024848b48 // mov rax, qword [rsp + 160] LONG $0x0654b60f; BYTE $0x1a // movzx edx, byte [rsi + rax + 26] LONG $0xea6e0f66 // movd xmm5, edx - QUAD $0x12064c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rsi + r8 + 18], 14 + LONG $0x244c8b4c; BYTE $0x18 // mov r9, qword [rsp + 24] + QUAD $0x120e4c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rsi + r9 + 18], 14 QUAD $0x000000f095df0f66 // pandn xmm2, oword 240[rbp] /* [rip + .LCPI7_15] */ LONG $0xd0eb0f66 // por xmm2, xmm0 LONG $0x6f0f4166; BYTE $0xf0 // movdqa xmm6, xmm8 @@ -37139,8 +38533,8 @@ LBB7_67: LONG $0x740f4166; BYTE $0xf0 // pcmpeqb xmm6, xmm8 LONG $0x0654b60f; BYTE $0x1b // movzx edx, byte [rsi + rax + 27] LONG $0xc26e0f66 // movd xmm0, edx - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] - QUAD $0x0f120e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 18], 15 + LONG $0x24448b4c; BYTE $0x48 // mov r8, qword [rsp + 72] + QUAD $0x12064c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r8 + 18], 15 QUAD $0x00000100b5ef0f66 // pxor xmm6, oword 256[rbp] /* [rip + .LCPI7_16] */ LONG $0xf6710f66; BYTE $0x07 // psllw xmm6, 7 LONG $0x75db0f66; BYTE $0x60 // pand xmm6, oword 96[rbp] /* [rip + .LCPI7_6] */ @@ -37148,7 +38542,7 @@ LBB7_67: LONG $0x0654b60f; BYTE $0x1c // movzx edx, byte [rsi + rax + 28] LONG $0x6e0f4466; BYTE $0xc2 // movd xmm8, edx LONG $0xeb0f4166; BYTE $0xf2 // por xmm6, xmm10 - QUAD $0x0000b024b47f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm6 + QUAD $0x0000c024b47f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm6 LONG $0xd76f0f66 // movdqa xmm2, xmm7 LONG $0xda0f4166; BYTE $0xd5 // pminub xmm2, xmm13 LONG $0xd7740f66 // pcmpeqb xmm2, xmm7 @@ -37161,8 +38555,8 @@ LBB7_67: LONG $0x0654b60f; BYTE $0x1d // movzx edx, byte [rsi + rax + 29] LONG $0xf26e0f66 // movd xmm6, edx LONG $0x0654b60f; BYTE $0x1e // movzx edx, byte [rsi + rax + 30] - LONG $0x067cb60f; BYTE $0x1f // movzx edi, byte [rsi + rax + 31] - LONG $0x24448b48; BYTE $0x38 // mov rax, qword [rsp + 56] + LONG $0x064cb60f; BYTE $0x1f // movzx ecx, byte [rsi + rax + 31] + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x0113065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 19], 1 QUAD $0x14064c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rax + 20], 1 QUAD $0x01150664203a0f66 // pinsrb xmm4, byte [rsi + rax + 21], 1 @@ -37175,7 +38569,7 @@ LBB7_67: QUAD $0x011d0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 29], 1 LONG $0xca6e0f66 // movd xmm1, edx QUAD $0x011e064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 30], 1 - LONG $0xff6e0f66 // movd xmm7, edi + LONG $0xf96e0f66 // movd xmm7, ecx QUAD $0x011f067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 31], 1 WORD $0x8948; BYTE $0xd8 // mov rax, rbx QUAD $0x02131e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 19], 2 @@ -37190,73 +38584,79 @@ LBB7_67: QUAD $0x021d1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 29], 2 QUAD $0x021e1e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 30], 2 QUAD $0x021f1e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 31], 2 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + LONG $0x24448b48; BYTE $0x40 // mov rax, qword [rsp + 64] QUAD $0x0313065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 19], 3 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + LONG $0x24548b48; BYTE $0x58 // mov rdx, qword [rsp + 88] QUAD $0x0413165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 19], 4 - LONG $0x246c8b4c; BYTE $0x40 // mov r13, qword [rsp + 64] - QUAD $0x132e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r13 + 19], 5 - LONG $0x247c8b48; BYTE $0x08 // mov rdi, qword [rsp + 8] + LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] + QUAD $0x05130e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 19], 5 + LONG $0x247c8b48; BYTE $0x68 // mov rdi, qword [rsp + 104] QUAD $0x06133e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 19], 6 - LONG $0x245c8b4c; BYTE $0x50 // mov r11, qword [rsp + 80] - QUAD $0x131e5c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r11 + 19], 7 - QUAD $0x13265c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r12 + 19], 8 - LONG $0x24748b4c; BYTE $0x78 // mov r14, qword [rsp + 120] - QUAD $0x13365c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r14 + 19], 9 - QUAD $0x13165c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r10 + 19], 10 - QUAD $0x133e5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r15 + 19], 11 - LONG $0x245c8b48; BYTE $0x68 // mov rbx, qword [rsp + 104] + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] + QUAD $0x13365c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r14 + 19], 7 + WORD $0x894d; BYTE $0xd4 // mov r12, r10 + QUAD $0x13165c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r10 + 19], 8 + QUAD $0x00000080249c8b4c // mov r11, qword [rsp + 128] + QUAD $0x131e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r11 + 19], 9 + LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] + QUAD $0x133e5c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r15 + 19], 10 + LONG $0x24548b4c; BYTE $0x28 // mov r10, qword [rsp + 40] + QUAD $0x13165c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r10 + 19], 11 + LONG $0x245c8b48; BYTE $0x38 // mov rbx, qword [rsp + 56] QUAD $0x0c131e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 19], 12 - QUAD $0x130e5c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r9 + 19], 13 - QUAD $0x13065c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rsi + r8 + 19], 14 - QUAD $0x0f130e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 19], 15 + QUAD $0x132e5c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r13 + 19], 13 + QUAD $0x130e5c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rsi + r9 + 19], 14 + QUAD $0x13065c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r8 + 19], 15 QUAD $0x14064c203a0f4466; BYTE $0x03 // pinsrb xmm9, byte [rsi + rax + 20], 3 QUAD $0x14164c203a0f4466; BYTE $0x04 // pinsrb xmm9, byte [rsi + rdx + 20], 4 - QUAD $0x142e4c203a0f4666; BYTE $0x05 // pinsrb xmm9, byte [rsi + r13 + 20], 5 + QUAD $0x140e4c203a0f4466; BYTE $0x05 // pinsrb xmm9, byte [rsi + rcx + 20], 5 QUAD $0x143e4c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rdi + 20], 6 - QUAD $0x141e4c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r11 + 20], 7 + QUAD $0x14364c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r14 + 20], 7 QUAD $0x14264c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r12 + 20], 8 - QUAD $0x14364c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r14 + 20], 9 - QUAD $0x14164c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r10 + 20], 10 - QUAD $0x143e4c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r15 + 20], 11 + QUAD $0x141e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r11 + 20], 9 + QUAD $0x143e4c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r15 + 20], 10 + QUAD $0x14164c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r10 + 20], 11 QUAD $0x141e4c203a0f4466; BYTE $0x0c // pinsrb xmm9, byte [rsi + rbx + 20], 12 - QUAD $0x140e4c203a0f4666; BYTE $0x0d // pinsrb xmm9, byte [rsi + r9 + 20], 13 - QUAD $0x14064c203a0f4666; BYTE $0x0e // pinsrb xmm9, byte [rsi + r8 + 20], 14 - QUAD $0x140e4c203a0f4466; BYTE $0x0f // pinsrb xmm9, byte [rsi + rcx + 20], 15 + QUAD $0x142e4c203a0f4666; BYTE $0x0d // pinsrb xmm9, byte [rsi + r13 + 20], 13 + QUAD $0x140e4c203a0f4666; BYTE $0x0e // pinsrb xmm9, byte [rsi + r9 + 20], 14 + QUAD $0x14064c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r8 + 20], 15 QUAD $0x03150664203a0f66 // pinsrb xmm4, byte [rsi + rax + 21], 3 QUAD $0x04151664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 21], 4 - QUAD $0x152e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r13 + 21], 5 + QUAD $0x05150e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 21], 5 QUAD $0x06153e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 21], 6 - QUAD $0x151e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r11 + 21], 7 + QUAD $0x153664203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r14 + 21], 7 QUAD $0x152664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r12 + 21], 8 - QUAD $0x153664203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r14 + 21], 9 - QUAD $0x151664203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r10 + 21], 10 - QUAD $0x153e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 21], 11 + QUAD $0x151e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r11 + 21], 9 + QUAD $0x153e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r15 + 21], 10 + QUAD $0x151664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r10 + 21], 11 QUAD $0x0c151e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 21], 12 - QUAD $0x150e64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r9 + 21], 13 - QUAD $0x150664203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r8 + 21], 14 - QUAD $0x0f150e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 21], 15 + WORD $0x8949; BYTE $0xd8 // mov r8, rbx + QUAD $0x152e64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r13 + 21], 13 + QUAD $0x150e64203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r9 + 21], 14 + LONG $0x245c8b48; BYTE $0x48 // mov rbx, qword [rsp + 72] + QUAD $0x0f151e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 21], 15 QUAD $0x16067c203a0f4466; BYTE $0x03 // pinsrb xmm15, byte [rsi + rax + 22], 3 QUAD $0x16167c203a0f4466; BYTE $0x04 // pinsrb xmm15, byte [rsi + rdx + 22], 4 - QUAD $0x162e7c203a0f4666; BYTE $0x05 // pinsrb xmm15, byte [rsi + r13 + 22], 5 + QUAD $0x160e7c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rcx + 22], 5 QUAD $0x163e7c203a0f4466; BYTE $0x06 // pinsrb xmm15, byte [rsi + rdi + 22], 6 - QUAD $0x161e7c203a0f4666; BYTE $0x07 // pinsrb xmm15, byte [rsi + r11 + 22], 7 + QUAD $0x16367c203a0f4666; BYTE $0x07 // pinsrb xmm15, byte [rsi + r14 + 22], 7 QUAD $0x16267c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r12 + 22], 8 - QUAD $0x16367c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r14 + 22], 9 - QUAD $0x16167c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r10 + 22], 10 + QUAD $0x161e7c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r11 + 22], 9 + QUAD $0x163e7c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r15 + 22], 10 QUAD $0x00011024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 272] QUAD $0x000000a095df0f66 // pandn xmm2, oword 160[rbp] /* [rip + .LCPI7_10] */ - QUAD $0x163e7c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r15 + 22], 11 + QUAD $0x16167c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r10 + 22], 11 QUAD $0x0000b095df0f4466; BYTE $0x00 // pandn xmm10, oword 176[rbp] /* [rip + .LCPI7_11] */ LONG $0xeb0f4466; BYTE $0xd2 // por xmm10, xmm2 - QUAD $0x161e7c203a0f4466; BYTE $0x0c // pinsrb xmm15, byte [rsi + rbx + 22], 12 + QUAD $0x16067c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r8 + 22], 12 LONG $0xd36f0f66 // movdqa xmm2, xmm3 LONG $0xda0f4166; BYTE $0xd5 // pminub xmm2, xmm13 LONG $0xd3740f66 // pcmpeqb xmm2, xmm3 - QUAD $0x160e7c203a0f4666; BYTE $0x0d // pinsrb xmm15, byte [rsi + r9 + 22], 13 + QUAD $0x162e7c203a0f4666; BYTE $0x0d // pinsrb xmm15, byte [rsi + r13 + 22], 13 QUAD $0x000000c095df0f66 // pandn xmm2, oword 192[rbp] /* [rip + .LCPI7_12] */ LONG $0xeb0f4166; BYTE $0xd2 // por xmm2, xmm10 - QUAD $0x16067c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r8 + 22], 14 + QUAD $0x160e7c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r9 + 22], 14 + WORD $0x894c; BYTE $0xcf // mov rdi, r9 QUAD $0x000100b5f80f4466; BYTE $0x00 // psubb xmm14, oword 256[rbp] /* [rip + .LCPI7_16] */ LONG $0xeb0f4166; BYTE $0xd6 // por xmm2, xmm14 LONG $0x6f0f4566; BYTE $0xd1 // movdqa xmm10, xmm9 @@ -37266,7 +38666,7 @@ LBB7_67: LONG $0x6f0f4566; BYTE $0xcd // movdqa xmm9, xmm13 LONG $0xda0f4166; BYTE $0xdd // pminub xmm3, xmm13 LONG $0xdc740f66 // pcmpeqb xmm3, xmm4 - QUAD $0x160e7c203a0f4466; BYTE $0x0f // pinsrb xmm15, byte [rsi + rcx + 22], 15 + QUAD $0x161e7c203a0f4466; BYTE $0x0f // pinsrb xmm15, byte [rsi + rbx + 22], 15 QUAD $0x0000d0ad6f0f4466; BYTE $0x00 // movdqa xmm13, oword 208[rbp] /* [rip + .LCPI7_13] */ LONG $0xdf0f4566; BYTE $0xd5 // pandn xmm10, xmm13 QUAD $0x000000e0a56f0f66 // movdqa xmm4, oword 224[rbp] /* [rip + .LCPI7_14] */ @@ -37278,17 +38678,20 @@ LBB7_67: LONG $0x740f4166; BYTE $0xe7 // pcmpeqb xmm4, xmm15 QUAD $0x17065c203a0f4466; BYTE $0x03 // pinsrb xmm11, byte [rsi + rax + 23], 3 QUAD $0x17165c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rdx + 23], 4 - QUAD $0x172e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r13 + 23], 5 - QUAD $0x173e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rdi + 23], 6 - QUAD $0x171e5c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r11 + 23], 7 + QUAD $0x170e5c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rcx + 23], 5 + WORD $0x8949; BYTE $0xc8 // mov r8, rcx + LONG $0x244c8b4c; BYTE $0x68 // mov r9, qword [rsp + 104] + QUAD $0x170e5c203a0f4666; BYTE $0x06 // pinsrb xmm11, byte [rsi + r9 + 23], 6 + QUAD $0x17365c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r14 + 23], 7 QUAD $0x17265c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r12 + 23], 8 - QUAD $0x17365c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r14 + 23], 9 - QUAD $0x17165c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r10 + 23], 10 - QUAD $0x173e5c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r15 + 23], 11 - QUAD $0x171e5c203a0f4466; BYTE $0x0c // pinsrb xmm11, byte [rsi + rbx + 23], 12 - QUAD $0x170e5c203a0f4666; BYTE $0x0d // pinsrb xmm11, byte [rsi + r9 + 23], 13 - QUAD $0x17065c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r8 + 23], 14 - QUAD $0x170e5c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rcx + 23], 15 + QUAD $0x171e5c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r11 + 23], 9 + QUAD $0x173e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r15 + 23], 10 + QUAD $0x17165c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r10 + 23], 11 + LONG $0x244c8b48; BYTE $0x38 // mov rcx, qword [rsp + 56] + QUAD $0x170e5c203a0f4466; BYTE $0x0c // pinsrb xmm11, byte [rsi + rcx + 23], 12 + QUAD $0x172e5c203a0f4666; BYTE $0x0d // pinsrb xmm11, byte [rsi + r13 + 23], 13 + QUAD $0x173e5c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rdi + 23], 14 + QUAD $0x171e5c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rbx + 23], 15 QUAD $0x0000f0bd6f0f4466; BYTE $0x00 // movdqa xmm15, oword 240[rbp] /* [rip + .LCPI7_15] */ LONG $0xdf0f4166; BYTE $0xe7 // pandn xmm4, xmm15 LONG $0xe3eb0f66 // por xmm4, xmm3 @@ -37302,30 +38705,30 @@ LBB7_67: LONG $0xdceb0f66 // por xmm3, xmm4 QUAD $0x190664203a0f4466; BYTE $0x03 // pinsrb xmm12, byte [rsi + rax + 25], 3 QUAD $0x191664203a0f4466; BYTE $0x04 // pinsrb xmm12, byte [rsi + rdx + 25], 4 - QUAD $0x192e64203a0f4666; BYTE $0x05 // pinsrb xmm12, byte [rsi + r13 + 25], 5 - QUAD $0x193e64203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rdi + 25], 6 - QUAD $0x191e64203a0f4666; BYTE $0x07 // pinsrb xmm12, byte [rsi + r11 + 25], 7 + QUAD $0x190664203a0f4666; BYTE $0x05 // pinsrb xmm12, byte [rsi + r8 + 25], 5 + QUAD $0x190e64203a0f4666; BYTE $0x06 // pinsrb xmm12, byte [rsi + r9 + 25], 6 + QUAD $0x193664203a0f4666; BYTE $0x07 // pinsrb xmm12, byte [rsi + r14 + 25], 7 QUAD $0x192664203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r12 + 25], 8 - QUAD $0x193664203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r14 + 25], 9 - QUAD $0x191664203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r10 + 25], 10 - QUAD $0x193e64203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r15 + 25], 11 - QUAD $0x191e64203a0f4466; BYTE $0x0c // pinsrb xmm12, byte [rsi + rbx + 25], 12 - QUAD $0x190e64203a0f4666; BYTE $0x0d // pinsrb xmm12, byte [rsi + r9 + 25], 13 - QUAD $0x190664203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rsi + r8 + 25], 14 - QUAD $0x190e64203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rsi + rcx + 25], 15 + QUAD $0x191e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r11 + 25], 9 + QUAD $0x193e64203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r15 + 25], 10 + QUAD $0x191664203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r10 + 25], 11 + QUAD $0x190e64203a0f4466; BYTE $0x0c // pinsrb xmm12, byte [rsi + rcx + 25], 12 + QUAD $0x192e64203a0f4666; BYTE $0x0d // pinsrb xmm12, byte [rsi + r13 + 25], 13 + QUAD $0x193e64203a0f4466; BYTE $0x0e // pinsrb xmm12, byte [rsi + rdi + 25], 14 + QUAD $0x191e64203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rsi + rbx + 25], 15 QUAD $0x031a066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 26], 3 QUAD $0x041a166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 26], 4 - QUAD $0x1a2e6c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r13 + 26], 5 - QUAD $0x061a3e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 26], 6 - QUAD $0x1a1e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r11 + 26], 7 + QUAD $0x1a066c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r8 + 26], 5 + QUAD $0x1a0e6c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r9 + 26], 6 + QUAD $0x1a366c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r14 + 26], 7 QUAD $0x1a266c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r12 + 26], 8 - QUAD $0x1a366c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r14 + 26], 9 - QUAD $0x1a166c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r10 + 26], 10 - QUAD $0x1a3e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 26], 11 - QUAD $0x0c1a1e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 26], 12 - QUAD $0x1a0e6c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r9 + 26], 13 - QUAD $0x1a066c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r8 + 26], 14 - QUAD $0x0f1a0e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 26], 15 + QUAD $0x1a1e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r11 + 26], 9 + QUAD $0x1a3e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r15 + 26], 10 + QUAD $0x1a166c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r10 + 26], 11 + QUAD $0x0c1a0e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 26], 12 + QUAD $0x1a2e6c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r13 + 26], 13 + QUAD $0x0e1a3e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 26], 14 + QUAD $0x0f1a1e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 26], 15 LONG $0xdaeb0f66 // por xmm3, xmm2 LONG $0x6f0f4166; BYTE $0xd4 // movdqa xmm2, xmm12 LONG $0xda0f4166; BYTE $0xd1 // pminub xmm2, xmm9 @@ -37341,17 +38744,17 @@ LBB7_67: LONG $0xdf0f4166; BYTE $0xd2 // pandn xmm2, xmm10 QUAD $0x031b0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 27], 3 QUAD $0x041b1644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 27], 4 - QUAD $0x1b2e44203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r13 + 27], 5 - QUAD $0x061b3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 27], 6 - QUAD $0x1b1e44203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r11 + 27], 7 + QUAD $0x1b0644203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r8 + 27], 5 + QUAD $0x1b0e44203a0f4266; BYTE $0x06 // pinsrb xmm0, byte [rsi + r9 + 27], 6 + QUAD $0x1b3644203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r14 + 27], 7 QUAD $0x1b2644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r12 + 27], 8 - QUAD $0x1b3644203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r14 + 27], 9 - QUAD $0x1b1644203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r10 + 27], 10 - QUAD $0x1b3e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r15 + 27], 11 - QUAD $0x0c1b1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 27], 12 - QUAD $0x1b0e44203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r9 + 27], 13 - QUAD $0x1b0644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r8 + 27], 14 - QUAD $0x0f1b0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 27], 15 + QUAD $0x1b1e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r11 + 27], 9 + QUAD $0x1b3e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r15 + 27], 10 + QUAD $0x1b1644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r10 + 27], 11 + QUAD $0x0c1b0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 27], 12 + QUAD $0x1b2e44203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r13 + 27], 13 + QUAD $0x0e1b3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 27], 14 + QUAD $0x0f1b1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 27], 15 QUAD $0x000000b0a5df0f66 // pandn xmm4, oword 176[rbp] /* [rip + .LCPI7_11] */ LONG $0xe2eb0f66 // por xmm4, xmm2 LONG $0xd06f0f66 // movdqa xmm2, xmm0 @@ -37361,30 +38764,30 @@ LBB7_67: LONG $0xd4eb0f66 // por xmm2, xmm4 QUAD $0x1c0644203a0f4466; BYTE $0x03 // pinsrb xmm8, byte [rsi + rax + 28], 3 QUAD $0x1c1644203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rdx + 28], 4 - QUAD $0x1c2e44203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r13 + 28], 5 - QUAD $0x1c3e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rdi + 28], 6 - QUAD $0x1c1e44203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r11 + 28], 7 + QUAD $0x1c0644203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r8 + 28], 5 + QUAD $0x1c0e44203a0f4666; BYTE $0x06 // pinsrb xmm8, byte [rsi + r9 + 28], 6 + QUAD $0x1c3644203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r14 + 28], 7 QUAD $0x1c2644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r12 + 28], 8 - QUAD $0x1c3644203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r14 + 28], 9 - QUAD $0x1c1644203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r10 + 28], 10 - QUAD $0x1c3e44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r15 + 28], 11 - QUAD $0x1c1e44203a0f4466; BYTE $0x0c // pinsrb xmm8, byte [rsi + rbx + 28], 12 - QUAD $0x1c0e44203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r9 + 28], 13 - QUAD $0x1c0644203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r8 + 28], 14 - QUAD $0x1c0e44203a0f4466; BYTE $0x0f // pinsrb xmm8, byte [rsi + rcx + 28], 15 + QUAD $0x1c1e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r11 + 28], 9 + QUAD $0x1c3e44203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r15 + 28], 10 + QUAD $0x1c1644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r10 + 28], 11 + QUAD $0x1c0e44203a0f4466; BYTE $0x0c // pinsrb xmm8, byte [rsi + rcx + 28], 12 + QUAD $0x1c2e44203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r13 + 28], 13 + QUAD $0x1c3e44203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rdi + 28], 14 + QUAD $0x1c1e44203a0f4466; BYTE $0x0f // pinsrb xmm8, byte [rsi + rbx + 28], 15 QUAD $0x031d0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 29], 3 QUAD $0x041d1674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 29], 4 - QUAD $0x1d2e74203a0f4266; BYTE $0x05 // pinsrb xmm6, byte [rsi + r13 + 29], 5 - QUAD $0x061d3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 29], 6 - QUAD $0x1d1e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r11 + 29], 7 + QUAD $0x1d0674203a0f4266; BYTE $0x05 // pinsrb xmm6, byte [rsi + r8 + 29], 5 + QUAD $0x1d0e74203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r9 + 29], 6 + QUAD $0x1d3674203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r14 + 29], 7 QUAD $0x1d2674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r12 + 29], 8 - QUAD $0x1d3674203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r14 + 29], 9 - QUAD $0x1d1674203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r10 + 29], 10 - QUAD $0x1d3e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r15 + 29], 11 - QUAD $0x0c1d1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 29], 12 - QUAD $0x1d0e74203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r9 + 29], 13 - QUAD $0x1d0674203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r8 + 29], 14 - QUAD $0x0f1d0e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 29], 15 + QUAD $0x1d1e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r11 + 29], 9 + QUAD $0x1d3e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r15 + 29], 10 + QUAD $0x1d1674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r10 + 29], 11 + QUAD $0x0c1d0e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 29], 12 + QUAD $0x1d2e74203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r13 + 29], 13 + QUAD $0x0e1d3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 29], 14 + QUAD $0x0f1d1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 29], 15 LONG $0x760f4566; BYTE $0xd2 // pcmpeqd xmm10, xmm10 LONG $0xf80f4566; BYTE $0xca // psubb xmm9, xmm10 LONG $0xeb0f4166; BYTE $0xd1 // por xmm2, xmm9 @@ -37398,29 +38801,29 @@ LBB7_67: QUAD $0x031f067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 31], 3 QUAD $0x041e164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 30], 4 QUAD $0x041f167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 31], 4 - QUAD $0x1e2e4c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r13 + 30], 5 - QUAD $0x1f2e7c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r13 + 31], 5 - QUAD $0x061e3e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 30], 6 - QUAD $0x061f3e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 31], 6 - QUAD $0x1e1e4c203a0f4266; BYTE $0x07 // pinsrb xmm1, byte [rsi + r11 + 30], 7 - QUAD $0x1f1e7c203a0f4266; BYTE $0x07 // pinsrb xmm7, byte [rsi + r11 + 31], 7 + QUAD $0x1e064c203a0f4266; BYTE $0x05 // pinsrb xmm1, byte [rsi + r8 + 30], 5 + QUAD $0x1f067c203a0f4266; BYTE $0x05 // pinsrb xmm7, byte [rsi + r8 + 31], 5 + QUAD $0x1e0e4c203a0f4266; BYTE $0x06 // pinsrb xmm1, byte [rsi + r9 + 30], 6 + QUAD $0x1f0e7c203a0f4266; BYTE $0x06 // pinsrb xmm7, byte [rsi + r9 + 31], 6 + QUAD $0x1e364c203a0f4266; BYTE $0x07 // pinsrb xmm1, byte [rsi + r14 + 30], 7 + QUAD $0x1f367c203a0f4266; BYTE $0x07 // pinsrb xmm7, byte [rsi + r14 + 31], 7 QUAD $0x1e264c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r12 + 30], 8 QUAD $0x1f267c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r12 + 31], 8 - QUAD $0x1e364c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r14 + 30], 9 - QUAD $0x1f367c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r14 + 31], 9 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] - QUAD $0x1e164c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r10 + 30], 10 - QUAD $0x1f167c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r10 + 31], 10 - QUAD $0x1e3e4c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r15 + 30], 11 - QUAD $0x1f3e7c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r15 + 31], 11 - QUAD $0x0c1e1e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 30], 12 - QUAD $0x0c1f1e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 31], 12 - QUAD $0x1e0e4c203a0f4266; BYTE $0x0d // pinsrb xmm1, byte [rsi + r9 + 30], 13 - QUAD $0x1f0e7c203a0f4266; BYTE $0x0d // pinsrb xmm7, byte [rsi + r9 + 31], 13 - QUAD $0x1e064c203a0f4266; BYTE $0x0e // pinsrb xmm1, byte [rsi + r8 + 30], 14 - QUAD $0x1f067c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rsi + r8 + 31], 14 - QUAD $0x0f1e0e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 30], 15 - QUAD $0x0f1f0e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 31], 15 + QUAD $0x1e1e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r11 + 30], 9 + QUAD $0x1f1e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r11 + 31], 9 + QUAD $0x1e3e4c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r15 + 30], 10 + QUAD $0x1f3e7c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r15 + 31], 10 + LONG $0x24048b48 // mov rax, qword [rsp] + QUAD $0x1e164c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r10 + 30], 11 + QUAD $0x1f167c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r10 + 31], 11 + QUAD $0x0c1e0e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 30], 12 + QUAD $0x0c1f0e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 31], 12 + QUAD $0x1e2e4c203a0f4266; BYTE $0x0d // pinsrb xmm1, byte [rsi + r13 + 30], 13 + QUAD $0x1f2e7c203a0f4266; BYTE $0x0d // pinsrb xmm7, byte [rsi + r13 + 31], 13 + QUAD $0x0e1e3e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 30], 14 + QUAD $0x0e1f3e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 31], 14 + QUAD $0x0f1e1e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 30], 15 + QUAD $0x0f1f1e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 31], 15 QUAD $0x000000d085df0f66 // pandn xmm0, oword 208[rbp] /* [rip + .LCPI7_13] */ QUAD $0x000000e0a5df0f66 // pandn xmm4, oword 224[rbp] /* [rip + .LCPI7_14] */ LONG $0xe0eb0f66 // por xmm4, xmm0 @@ -37439,9 +38842,9 @@ LBB7_67: LONG $0xcaeb0f66 // por xmm1, xmm2 LONG $0xc36f0f66 // movdqa xmm0, xmm3 LONG $0xc1600f66 // punpcklbw xmm0, xmm1 - QUAD $0x0000c024ac6f0f66; BYTE $0x00 // movdqa xmm5, oword [rsp + 192] + QUAD $0x0000d024ac6f0f66; BYTE $0x00 // movdqa xmm5, oword [rsp + 208] LONG $0xd56f0f66 // movdqa xmm2, xmm5 - QUAD $0x0000b024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 176] + QUAD $0x0000c024b46f0f66; BYTE $0x00 // movdqa xmm6, oword [rsp + 192] LONG $0xd6600f66 // punpcklbw xmm2, xmm6 LONG $0xe26f0f66 // movdqa xmm4, xmm2 LONG $0xe0610f66 // punpcklwd xmm4, xmm0 @@ -37451,437 +38854,34 @@ LBB7_67: LONG $0xc56f0f66 // movdqa xmm0, xmm5 LONG $0xc3610f66 // punpcklwd xmm0, xmm3 LONG $0xeb690f66 // punpckhwd xmm5, xmm3 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] + QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] LONG $0x6c7f0ff3; WORD $0x3088 // movdqu oword [rax + 4*rcx + 48], xmm5 LONG $0x447f0ff3; WORD $0x2088 // movdqu oword [rax + 4*rcx + 32], xmm0 LONG $0x547f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm2 + LONG $0x24048948 // mov qword [rsp], rax LONG $0x247f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm4 LONG $0x10c18348 // add rcx, 16 WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x000000e8248c3b48 // cmp rcx, qword [rsp + 232] JNE LBB7_67 - QUAD $0x0000010824948b4c // mov r10, qword [rsp + 264] - QUAD $0x000000e824943b4c // cmp r10, qword [rsp + 232] + QUAD $0x0000010824bc8b4c // mov r15, qword [rsp + 264] + QUAD $0x000000e824bc3b4c // cmp r15, qword [rsp + 232] QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] - QUAD $0x0000014024b48b4c // mov r14, qword [rsp + 320] + LONG $0x24548a44; BYTE $0x08 // mov r10b, byte [rsp + 8] + QUAD $0x0000014024a48b4c // mov r12, qword [rsp + 320] JNE LBB7_69 JMP LBB7_72 -LBB7_180: - WORD $0x894d; BYTE $0xd0 // mov r8, r10 - LONG $0xfce08349 // and r8, -4 - WORD $0x894c; BYTE $0xc3 // mov rbx, r8 - LONG $0x07e3c148 // shl rbx, 7 - WORD $0x0148; BYTE $0xf3 // add rbx, rsi - LONG $0x84348d4f // lea r14, [r12 + 4*r8] - LONG $0xeb280f45 // movaps xmm13, xmm11 - LONG $0xebc60f45; BYTE $0x00 // shufps xmm13, xmm11, 0 - LONG $0xfcc68148; WORD $0x0001; BYTE $0x00 // add rsi, 508 - WORD $0xc931 // xor ecx, ecx - LONG $0x6f0f4466; WORD $0x007d // movdqa xmm15, oword 0[rbp] /* [rip + .LCPI7_0] */ - -LBB7_181: - QUAD $0xfffffe049e100ff3 // movss xmm3, dword [rsi - 508] - QUAD $0xfffe0896100f44f3; BYTE $0xff // movss xmm10, dword [rsi - 504] - QUAD $0xfffe0c8e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 500] - QUAD $0xfffffe108e100ff3 // movss xmm1, dword [rsi - 496] - QUAD $0xfffe849e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 380], 16 - QUAD $0xffff049e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 252], 32 - LONG $0x213a0f66; WORD $0x845e; BYTE $0x30 // insertps xmm3, dword [rsi - 124], 48 - QUAD $0xfe8896213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm10, dword [rsi - 376], 16 - QUAD $0xff0896213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm10, dword [rsi - 248], 32 - QUAD $0x308856213a0f4466 // insertps xmm10, dword [rsi - 120], 48 - QUAD $0xfe8c8e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 372], 16 - QUAD $0xff0c8e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 244], 32 - QUAD $0x308c4e213a0f4466 // insertps xmm9, dword [rsi - 116], 48 - QUAD $0xfffe908e213a0f66; WORD $0x10ff // insertps xmm1, dword [rsi - 368], 16 - QUAD $0xffff108e213a0f66; WORD $0x20ff // insertps xmm1, dword [rsi - 240], 32 - LONG $0x213a0f66; WORD $0x904e; BYTE $0x30 // insertps xmm1, dword [rsi - 112], 48 - QUAD $0xfffe1486100f44f3; BYTE $0xff // movss xmm8, dword [rsi - 492] - QUAD $0xfe9486213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm8, dword [rsi - 364], 16 - QUAD $0xff1486213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm8, dword [rsi - 236], 32 - LONG $0xe5280f45 // movaps xmm12, xmm13 - QUAD $0x309446213a0f4466 // insertps xmm8, dword [rsi - 108], 48 - QUAD $0xfffffe1896100ff3 // movss xmm2, dword [rsi - 488] - QUAD $0xfffe9896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 360], 16 - QUAD $0xffff1896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 232], 32 - LONG $0xe3c20f44; BYTE $0x01 // cmpltps xmm12, xmm3 - LONG $0x213a0f66; WORD $0x9856; BYTE $0x30 // insertps xmm2, dword [rsi - 104], 48 - QUAD $0xfffffe1c9e100ff3 // movss xmm3, dword [rsi - 484] - QUAD $0xfffe9c9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 356], 16 - QUAD $0xffff1c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 228], 32 - LONG $0x6b0f4566; BYTE $0xe4 // packssdw xmm12, xmm12 - LONG $0x213a0f66; WORD $0x9c5e; BYTE $0x30 // insertps xmm3, dword [rsi - 100], 48 - QUAD $0xfffffe24a6100ff3 // movss xmm4, dword [rsi - 476] - QUAD $0xfffea4a6213a0f66; WORD $0x10ff // insertps xmm4, dword [rsi - 348], 16 - QUAD $0xffff24a6213a0f66; WORD $0x20ff // insertps xmm4, dword [rsi - 220], 32 - LONG $0x630f4566; BYTE $0xe4 // packsswb xmm12, xmm12 - LONG $0x213a0f66; WORD $0xa466; BYTE $0x30 // insertps xmm4, dword [rsi - 92], 48 - LONG $0xfd280f41 // movaps xmm7, xmm13 - QUAD $0xfffffe44ae100ff3 // movss xmm5, dword [rsi - 444] - QUAD $0xfffec4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 316], 16 - QUAD $0xffff44ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 188], 32 - LONG $0x01fcc20f // cmpltps xmm7, xmm4 - LONG $0x213a0f66; WORD $0xc46e; BYTE $0x30 // insertps xmm5, dword [rsi - 60], 48 - LONG $0xf5280f41 // movaps xmm6, xmm13 - QUAD $0xfffffe6486100ff3 // movss xmm0, dword [rsi - 412] - QUAD $0xfffee486213a0f66; WORD $0x10ff // insertps xmm0, dword [rsi - 284], 16 - QUAD $0xffff6486213a0f66; WORD $0x20ff // insertps xmm0, dword [rsi - 156], 32 - LONG $0x01f5c20f // cmpltps xmm6, xmm5 - LONG $0x213a0f66; WORD $0xe446; BYTE $0x30 // insertps xmm0, dword [rsi - 28], 48 - LONG $0xe5280f41 // movaps xmm4, xmm13 - LONG $0x01e0c20f // cmpltps xmm4, xmm0 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0xc2c20f41; BYTE $0x01 // cmpltps xmm0, xmm10 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0x6f0f4466; BYTE $0xf0 // movdqa xmm14, xmm0 - LONG $0xdb0f4566; BYTE $0xf7 // pand xmm14, xmm15 - LONG $0xf80f4466; BYTE $0xf0 // psubb xmm14, xmm0 - QUAD $0xfffe2096100f44f3; BYTE $0xff // movss xmm10, dword [rsi - 480] - QUAD $0xfea096213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm10, dword [rsi - 352], 16 - LONG $0xdb0f4566; BYTE $0xe7 // pand xmm12, xmm15 - QUAD $0xff2096213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm10, dword [rsi - 224], 32 - LONG $0xeb0f4566; BYTE $0xf4 // por xmm14, xmm12 - LONG $0xed280f41 // movaps xmm5, xmm13 - LONG $0xe9c20f41; BYTE $0x01 // cmpltps xmm5, xmm9 - QUAD $0x30a056213a0f4466 // insertps xmm10, dword [rsi - 96], 48 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x02 // psllw xmm5, 2 - LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI7_1] */ - LONG $0xe8db0f66 // pand xmm5, xmm0 - LONG $0xeb0f4166; BYTE $0xee // por xmm5, xmm14 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0x01c1c20f // cmpltps xmm0, xmm1 - LONG $0xcd280f41 // movaps xmm1, xmm13 - LONG $0xc8c20f41; BYTE $0x01 // cmpltps xmm1, xmm8 - QUAD $0xfffe288e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 472] - QUAD $0xfea88e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 344], 16 - QUAD $0xff288e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 216], 32 - QUAD $0x30a84e213a0f4466 // insertps xmm9, dword [rsi - 88], 48 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 - LONG $0xf0710f66; BYTE $0x03 // psllw xmm0, 3 - LONG $0x6f0f4466; WORD $0x2075 // movdqa xmm14, oword 32[rbp] /* [rip + .LCPI7_2] */ - LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 - LONG $0xc96b0f66 // packssdw xmm1, xmm1 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 - LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 - LONG $0x6f0f4466; WORD $0x3075 // movdqa xmm14, oword 48[rbp] /* [rip + .LCPI7_3] */ - LONG $0xdb0f4166; BYTE $0xce // pand xmm1, xmm14 - LONG $0xc8eb0f66 // por xmm1, xmm0 - QUAD $0xfffe2ca6100f44f3; BYTE $0xff // movss xmm12, dword [rsi - 468] - QUAD $0xfeaca6213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm12, dword [rsi - 340], 16 - QUAD $0xff2ca6213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm12, dword [rsi - 212], 32 - QUAD $0x30ac66213a0f4466 // insertps xmm12, dword [rsi - 84], 48 - LONG $0xcdeb0f66 // por xmm1, xmm5 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0x01c2c20f // cmpltps xmm0, xmm2 - LONG $0xed280f41 // movaps xmm5, xmm13 - LONG $0x01ebc20f // cmpltps xmm5, xmm3 - QUAD $0xfffffe3096100ff3 // movss xmm2, dword [rsi - 464] - QUAD $0xfffeb096213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 336], 16 - QUAD $0xffff3096213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 208], 32 - LONG $0xff6b0f66 // packssdw xmm7, xmm7 - LONG $0x213a0f66; WORD $0xb056; BYTE $0x30 // insertps xmm2, dword [rsi - 80], 48 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 - LONG $0xf0710f66; BYTE $0x05 // psllw xmm0, 5 - LONG $0x6f0f4466; WORD $0x4075 // movdqa xmm14, oword 64[rbp] /* [rip + .LCPI7_4] */ - LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x06 // psllw xmm5, 6 - LONG $0x5d6f0f66; BYTE $0x50 // movdqa xmm3, oword 80[rbp] /* [rip + .LCPI7_5] */ - LONG $0xebdb0f66 // pand xmm5, xmm3 - LONG $0xe8eb0f66 // por xmm5, xmm0 - LONG $0xc5280f45 // movaps xmm8, xmm13 - LONG $0xc2c20f45; BYTE $0x01 // cmpltps xmm8, xmm10 - QUAD $0xfffffe349e100ff3 // movss xmm3, dword [rsi - 460] - QUAD $0xfffeb49e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 332], 16 - QUAD $0xffff349e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 204], 32 - LONG $0x213a0f66; WORD $0xb45e; BYTE $0x30 // insertps xmm3, dword [rsi - 76], 48 - LONG $0x6b0f4566; BYTE $0xc0 // packssdw xmm8, xmm8 - LONG $0x630f4566; BYTE $0xc0 // packsswb xmm8, xmm8 - LONG $0x710f4166; WORD $0x07f0 // psllw xmm8, 7 - LONG $0x456f0f66; BYTE $0x60 // movdqa xmm0, oword 96[rbp] /* [rip + .LCPI7_6] */ - LONG $0xdb0f4466; BYTE $0xc0 // pand xmm8, xmm0 - LONG $0xeb0f4466; BYTE $0xc5 // por xmm8, xmm5 - QUAD $0xfffe3896100f44f3; BYTE $0xff // movss xmm10, dword [rsi - 456] - QUAD $0xfeb896213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm10, dword [rsi - 328], 16 - QUAD $0xff3896213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm10, dword [rsi - 200], 32 - LONG $0xff630f66 // packsswb xmm7, xmm7 - QUAD $0x30b856213a0f4466 // insertps xmm10, dword [rsi - 72], 48 - LONG $0xeb0f4466; BYTE $0xc1 // por xmm8, xmm1 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0xc1c20f41; BYTE $0x01 // cmpltps xmm0, xmm9 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xc86f0f66 // movdqa xmm1, xmm0 - LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 - LONG $0xc8f80f66 // psubb xmm1, xmm0 - QUAD $0xfffe3c8e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 452] - QUAD $0xfebc8e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 324], 16 - LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 - QUAD $0xff3c8e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 196], 32 - LONG $0xcfeb0f66 // por xmm1, xmm7 - LONG $0xed280f41 // movaps xmm5, xmm13 - LONG $0xecc20f41; BYTE $0x01 // cmpltps xmm5, xmm12 - QUAD $0x30bc4e213a0f4466 // insertps xmm9, dword [rsi - 68], 48 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x02 // psllw xmm5, 2 - LONG $0x6ddb0f66; BYTE $0x10 // pand xmm5, oword 16[rbp] /* [rip + .LCPI7_1] */ - LONG $0xe9eb0f66 // por xmm5, xmm1 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0x01c2c20f // cmpltps xmm0, xmm2 - LONG $0xcd280f41 // movaps xmm1, xmm13 - LONG $0x01cbc20f // cmpltps xmm1, xmm3 - QUAD $0xfffffe409e100ff3 // movss xmm3, dword [rsi - 448] - QUAD $0xfffec09e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 320], 16 - QUAD $0xffff409e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 192], 32 - LONG $0x213a0f66; WORD $0xc05e; BYTE $0x30 // insertps xmm3, dword [rsi - 64], 48 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 - LONG $0xf0710f66; BYTE $0x03 // psllw xmm0, 3 - LONG $0x6f0f4466; WORD $0x2065 // movdqa xmm12, oword 32[rbp] /* [rip + .LCPI7_2] */ - LONG $0xdb0f4166; BYTE $0xc4 // pand xmm0, xmm12 - LONG $0xc96b0f66 // packssdw xmm1, xmm1 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 - LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 - LONG $0x4ddb0f66; BYTE $0x30 // pand xmm1, oword 48[rbp] /* [rip + .LCPI7_3] */ - LONG $0xc8eb0f66 // por xmm1, xmm0 - QUAD $0xfffffe4896100ff3 // movss xmm2, dword [rsi - 440] - QUAD $0xfffec896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 312], 16 - QUAD $0xffff4896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 184], 32 - LONG $0x213a0f66; WORD $0xc856; BYTE $0x30 // insertps xmm2, dword [rsi - 56], 48 - LONG $0xcdeb0f66 // por xmm1, xmm5 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0xc2c20f41; BYTE $0x01 // cmpltps xmm0, xmm10 - LONG $0xed280f41 // movaps xmm5, xmm13 - LONG $0xe9c20f41; BYTE $0x01 // cmpltps xmm5, xmm9 - QUAD $0xfffffe4cbe100ff3 // movss xmm7, dword [rsi - 436] - QUAD $0xfffeccbe213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 308], 16 - QUAD $0xffff4cbe213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 180], 32 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0x213a0f66; WORD $0xcc7e; BYTE $0x30 // insertps xmm7, dword [rsi - 52], 48 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 - LONG $0xf0710f66; BYTE $0x05 // psllw xmm0, 5 - LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x06 // psllw xmm5, 6 - LONG $0x6ddb0f66; BYTE $0x50 // pand xmm5, oword 80[rbp] /* [rip + .LCPI7_5] */ - LONG $0xe8eb0f66 // por xmm5, xmm0 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0x01c3c20f // cmpltps xmm0, xmm3 - QUAD $0xfffffe509e100ff3 // movss xmm3, dword [rsi - 432] - QUAD $0xfffed09e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 304], 16 - QUAD $0xffff509e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 176], 32 - LONG $0x213a0f66; WORD $0xd05e; BYTE $0x30 // insertps xmm3, dword [rsi - 48], 48 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xf0710f66; BYTE $0x07 // psllw xmm0, 7 - LONG $0x6f0f4466; WORD $0x6055 // movdqa xmm10, oword 96[rbp] /* [rip + .LCPI7_6] */ - LONG $0xdb0f4166; BYTE $0xc2 // pand xmm0, xmm10 - LONG $0xc5eb0f66 // por xmm0, xmm5 - QUAD $0xfffffe54ae100ff3 // movss xmm5, dword [rsi - 428] - QUAD $0xfffed4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 300], 16 - QUAD $0xffff54ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 172], 32 - LONG $0x213a0f66; WORD $0xd46e; BYTE $0x30 // insertps xmm5, dword [rsi - 44], 48 - LONG $0xc1eb0f66 // por xmm0, xmm1 - QUAD $0xfffe588e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 424] - QUAD $0xfed88e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 296], 16 - QUAD $0xff588e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 168], 32 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - QUAD $0x30d84e213a0f4466 // insertps xmm9, dword [rsi - 40], 48 - LONG $0x620f4466; BYTE $0xc0 // punpckldq xmm8, xmm0 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0x01c2c20f // cmpltps xmm0, xmm2 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xc86f0f66 // movdqa xmm1, xmm0 - LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 - LONG $0xc8f80f66 // psubb xmm1, xmm0 - QUAD $0xfffffe5c96100ff3 // movss xmm2, dword [rsi - 420] - QUAD $0xfffedc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 292], 16 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - QUAD $0xffff5c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 164], 32 - LONG $0xceeb0f66 // por xmm1, xmm6 - LONG $0xf5280f41 // movaps xmm6, xmm13 - LONG $0x01f7c20f // cmpltps xmm6, xmm7 - LONG $0x213a0f66; WORD $0xdc56; BYTE $0x30 // insertps xmm2, dword [rsi - 36], 48 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf6710f66; BYTE $0x02 // psllw xmm6, 2 - LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI7_1] */ - LONG $0xf0db0f66 // pand xmm6, xmm0 - LONG $0xf1eb0f66 // por xmm6, xmm1 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0x01c3c20f // cmpltps xmm0, xmm3 - LONG $0xcd280f41 // movaps xmm1, xmm13 - LONG $0x01cdc20f // cmpltps xmm1, xmm5 - QUAD $0xfffffe609e100ff3 // movss xmm3, dword [rsi - 416] - QUAD $0xfffee09e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 288], 16 - QUAD $0xffff609e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 160], 32 - LONG $0x213a0f66; WORD $0xe05e; BYTE $0x30 // insertps xmm3, dword [rsi - 32], 48 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 - LONG $0xf0710f66; BYTE $0x03 // psllw xmm0, 3 - LONG $0xdb0f4166; BYTE $0xc4 // pand xmm0, xmm12 - LONG $0xc96b0f66 // packssdw xmm1, xmm1 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 - LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 - LONG $0x6f0f4466; WORD $0x3065 // movdqa xmm12, oword 48[rbp] /* [rip + .LCPI7_3] */ - LONG $0xdb0f4166; BYTE $0xcc // pand xmm1, xmm12 - LONG $0xc8eb0f66 // por xmm1, xmm0 - QUAD $0xfffffe68ae100ff3 // movss xmm5, dword [rsi - 408] - QUAD $0xfffee8ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 280], 16 - QUAD $0xffff68ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 152], 32 - LONG $0x213a0f66; WORD $0xe86e; BYTE $0x30 // insertps xmm5, dword [rsi - 24], 48 - LONG $0xceeb0f66 // por xmm1, xmm6 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0xc1c20f41; BYTE $0x01 // cmpltps xmm0, xmm9 - LONG $0xf5280f41 // movaps xmm6, xmm13 - LONG $0x01f2c20f // cmpltps xmm6, xmm2 - QUAD $0xfffffe6cbe100ff3 // movss xmm7, dword [rsi - 404] - QUAD $0xfffeecbe213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 276], 16 - QUAD $0xffff6cbe213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 148], 32 - LONG $0xe46b0f66 // packssdw xmm4, xmm4 - LONG $0x213a0f66; WORD $0xec7e; BYTE $0x30 // insertps xmm7, dword [rsi - 20], 48 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 - LONG $0xf0710f66; BYTE $0x05 // psllw xmm0, 5 - LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 - LONG $0xf66b0f66 // packssdw xmm6, xmm6 - LONG $0xf6630f66 // packsswb xmm6, xmm6 - LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 - LONG $0xf6710f66; BYTE $0x06 // psllw xmm6, 6 - LONG $0x6f0f4466; WORD $0x504d // movdqa xmm9, oword 80[rbp] /* [rip + .LCPI7_5] */ - LONG $0xdb0f4166; BYTE $0xf1 // pand xmm6, xmm9 - LONG $0xf0eb0f66 // por xmm6, xmm0 - LONG $0xd5280f41 // movaps xmm2, xmm13 - LONG $0x01d3c20f // cmpltps xmm2, xmm3 - QUAD $0xfffffe7086100ff3 // movss xmm0, dword [rsi - 400] - QUAD $0xfffef086213a0f66; WORD $0x10ff // insertps xmm0, dword [rsi - 272], 16 - QUAD $0xffff7086213a0f66; WORD $0x20ff // insertps xmm0, dword [rsi - 144], 32 - LONG $0x213a0f66; WORD $0xf046; BYTE $0x30 // insertps xmm0, dword [rsi - 16], 48 - LONG $0xd26b0f66 // packssdw xmm2, xmm2 - LONG $0xd2630f66 // packsswb xmm2, xmm2 - LONG $0xf2710f66; BYTE $0x07 // psllw xmm2, 7 - LONG $0xdb0f4166; BYTE $0xd2 // pand xmm2, xmm10 - LONG $0xd6eb0f66 // por xmm2, xmm6 - QUAD $0xfffffe74b6100ff3 // movss xmm6, dword [rsi - 396] - QUAD $0xfffef4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 268], 16 - QUAD $0xffff74b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 140], 32 - LONG $0xe4630f66 // packsswb xmm4, xmm4 - LONG $0x213a0f66; WORD $0xf476; BYTE $0x30 // insertps xmm6, dword [rsi - 12], 48 - LONG $0xd1eb0f66 // por xmm2, xmm1 - LONG $0xcd280f41 // movaps xmm1, xmm13 - LONG $0x01cdc20f // cmpltps xmm1, xmm5 - LONG $0xc96b0f66 // packssdw xmm1, xmm1 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - LONG $0xe96f0f66 // movdqa xmm5, xmm1 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xe9f80f66 // psubb xmm5, xmm1 - QUAD $0xfffffe789e100ff3 // movss xmm3, dword [rsi - 392] - QUAD $0xfffef89e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 264], 16 - LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 - QUAD $0xffff789e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 136], 32 - LONG $0xeceb0f66 // por xmm5, xmm4 - LONG $0xe5280f41 // movaps xmm4, xmm13 - LONG $0x01e7c20f // cmpltps xmm4, xmm7 - LONG $0x213a0f66; WORD $0xf85e; BYTE $0x30 // insertps xmm3, dword [rsi - 8], 48 - LONG $0xe46b0f66 // packssdw xmm4, xmm4 - LONG $0xe4630f66 // packsswb xmm4, xmm4 - LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 - LONG $0xf4710f66; BYTE $0x02 // psllw xmm4, 2 - LONG $0x65db0f66; BYTE $0x10 // pand xmm4, oword 16[rbp] /* [rip + .LCPI7_1] */ - LONG $0xe5eb0f66 // por xmm4, xmm5 - LONG $0xed280f41 // movaps xmm5, xmm13 - LONG $0x01e8c20f // cmpltps xmm5, xmm0 - LONG $0xcd280f41 // movaps xmm1, xmm13 - LONG $0x01cec20f // cmpltps xmm1, xmm6 - QUAD $0xfffffe7c86100ff3 // movss xmm0, dword [rsi - 388] - QUAD $0xfffefc86213a0f66; WORD $0x10ff // insertps xmm0, dword [rsi - 260], 16 - QUAD $0xffff7c86213a0f66; WORD $0x20ff // insertps xmm0, dword [rsi - 132], 32 - LONG $0x213a0f66; WORD $0xfc46; BYTE $0x30 // insertps xmm0, dword [rsi - 4], 48 - LONG $0xed6b0f66 // packssdw xmm5, xmm5 - LONG $0xed630f66 // packsswb xmm5, xmm5 - LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 - LONG $0xf5710f66; BYTE $0x03 // psllw xmm5, 3 - LONG $0x6ddb0f66; BYTE $0x20 // pand xmm5, oword 32[rbp] /* [rip + .LCPI7_2] */ - LONG $0xc96b0f66 // packssdw xmm1, xmm1 - LONG $0xc9630f66 // packsswb xmm1, xmm1 - LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 - LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 - LONG $0xdb0f4166; BYTE $0xcc // pand xmm1, xmm12 - LONG $0xcdeb0f66 // por xmm1, xmm5 - QUAD $0xfffffe80ae100ff3 // movss xmm5, dword [rsi - 384] - QUAD $0xffff00ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 256], 16 - LONG $0x213a0f66; WORD $0x806e; BYTE $0x20 // insertps xmm5, dword [rsi - 128], 32 - LONG $0xcceb0f66 // por xmm1, xmm4 - LONG $0xe5280f41 // movaps xmm4, xmm13 - LONG $0x01e3c20f // cmpltps xmm4, xmm3 - LONG $0xdd280f41 // movaps xmm3, xmm13 - LONG $0x01d8c20f // cmpltps xmm3, xmm0 - LONG $0x213a0f66; WORD $0x302e // insertps xmm5, dword [rsi], 48 - LONG $0xe46b0f66 // packssdw xmm4, xmm4 - LONG $0xe4630f66 // packsswb xmm4, xmm4 - LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 - LONG $0xf4710f66; BYTE $0x05 // psllw xmm4, 5 - LONG $0xdb0f4166; BYTE $0xe6 // pand xmm4, xmm14 - LONG $0xdb6b0f66 // packssdw xmm3, xmm3 - LONG $0xdb630f66 // packsswb xmm3, xmm3 - LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 - LONG $0xf3710f66; BYTE $0x06 // psllw xmm3, 6 - LONG $0xdb0f4166; BYTE $0xd9 // pand xmm3, xmm9 - LONG $0xdceb0f66 // por xmm3, xmm4 - LONG $0xc5280f41 // movaps xmm0, xmm13 - LONG $0x01c5c20f // cmpltps xmm0, xmm5 - LONG $0xc06b0f66 // packssdw xmm0, xmm0 - LONG $0xc0630f66 // packsswb xmm0, xmm0 - LONG $0xf0710f66; BYTE $0x07 // psllw xmm0, 7 - LONG $0xdb0f4166; BYTE $0xc2 // pand xmm0, xmm10 - LONG $0xc3eb0f66 // por xmm0, xmm3 - LONG $0xc1eb0f66 // por xmm0, xmm1 - LONG $0xd0620f66 // punpckldq xmm2, xmm0 - LONG $0x600f4466; BYTE $0xc2 // punpcklbw xmm8, xmm2 - LONG $0x380f4466; WORD $0x4500; BYTE $0x70 // pshufb xmm8, oword 112[rbp] /* [rip + .LCPI7_7] */ - LONG $0x7f0f45f3; WORD $0x8c04 // movdqu oword [r12 + 4*rcx], xmm8 - LONG $0x04c18348 // add rcx, 4 - LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // add rsi, 512 - WORD $0x3949; BYTE $0xc8 // cmp r8, rcx - JNE LBB7_181 - WORD $0x394d; BYTE $0xc2 // cmp r10, r8 - JNE LBB7_183 - JMP LBB7_186 - -LBB7_122: - LONG $0xf8e68349 // and r14, -8 - WORD $0x894c; BYTE $0xf0 // mov rax, r14 +LBB7_124: + LONG $0xf8e78349 // and r15, -8 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi - LONG $0x24448948; BYTE $0x30 // mov qword [rsp + 48], rax - LONG $0x2474894c; BYTE $0x18 // mov qword [rsp + 24], r14 - LONG $0xb4048d4b // lea rax, [r12 + 4*r14] - LONG $0x24048948 // mov qword [rsp], rax + LONG $0x24448948; BYTE $0x38 // mov qword [rsp + 56], rax + LONG $0x247c894c; BYTE $0x28 // mov qword [rsp + 40], r15 + LONG $0x24048b48 // mov rax, qword [rsp] + LONG $0xb8048d4a // lea rax, [rax + 4*r15] + LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax QUAD $0x0000f024846e0f66; BYTE $0x00 // movd xmm0, dword [rsp + 240] LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 LONG $0xc0700f66; BYTE $0x00 // pshufd xmm0, xmm0, 0 @@ -37893,9 +38893,8 @@ LBB7_122: LONG $0x6f0f4466; WORD $0x4065 // movdqa xmm12, oword 64[rbp] /* [rip + .LCPI7_4] */ LONG $0x6f0f4466; WORD $0x506d // movdqa xmm13, oword 80[rbp] /* [rip + .LCPI7_5] */ LONG $0x6f0f4466; WORD $0x6075 // movdqa xmm14, oword 96[rbp] /* [rip + .LCPI7_6] */ - QUAD $0x0000008024a4894c // mov qword [rsp + 128], r12 -LBB7_123: +LBB7_125: LONG $0x247c894c; BYTE $0x10 // mov qword [rsp + 16], r15 LONG $0x06e7c149 // shl r15, 6 WORD $0x894d; BYTE $0xf9 // mov r9, r15 @@ -37931,7 +38930,7 @@ LBB7_123: QUAD $0x02020e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 2], 2 QUAD $0x03022674c40f4266 // pinsrw xmm6, word [rsi + r12 + 2], 3 LONG $0x44b70f42; WORD $0x0c3e // movzx eax, word [rsi + r15 + 12] - LONG $0x08244489 // mov dword [rsp + 8], eax + LONG $0x20244489 // mov dword [rsp + 32], eax QUAD $0x04022e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 2], 4 LONG $0x6e0f4166; BYTE $0xd3 // movd xmm2, r11d LONG $0x5cb70f46; WORD $0x0e3e // movzx r11d, word [rsi + r15 + 14] @@ -37941,7 +38940,7 @@ LBB7_123: LONG $0x74c40f66; WORD $0x023e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 2], 6 LONG $0x6e0f4166; BYTE $0xda // movd xmm3, r10d LONG $0x44b70f42; WORD $0x123e // movzx eax, word [rsi + r15 + 18] - LONG $0x28244489 // mov dword [rsp + 40], eax + LONG $0x18244489 // mov dword [rsp + 24], eax LONG $0x74c40f66; WORD $0x021e; BYTE $0x07 // pinsrw xmm6, word [rsi + rbx + 2], 7 LONG $0xf0650f66 // pcmpgtw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -37975,7 +38974,7 @@ LBB7_123: LONG $0x5cc40f66; WORD $0x083e; BYTE $0x06 // pinsrw xmm3, word [rsi + rdi + 8], 6 LONG $0x5cc40f66; WORD $0x081e; BYTE $0x07 // pinsrw xmm3, word [rsi + rbx + 8], 7 LONG $0xcceb0f66 // por xmm1, xmm4 - LONG $0x7c6e0f66; WORD $0x0824 // movd xmm7, dword [rsp + 8] + LONG $0x7c6e0f66; WORD $0x2024 // movd xmm7, dword [rsp + 32] LONG $0x44b70f42; WORD $0x163e // movzx eax, word [rsi + r15 + 22] LONG $0xd0650f66 // pcmpgtw xmm2, xmm0 LONG $0xd2630f66 // packsswb xmm2, xmm2 @@ -38013,7 +39012,7 @@ LBB7_123: LONG $0x7cc40f66; WORD $0x0c3e; BYTE $0x06 // pinsrw xmm7, word [rsi + rdi + 12], 6 LONG $0x7cc40f66; WORD $0x0c1e; BYTE $0x07 // pinsrw xmm7, word [rsi + rbx + 12], 7 LONG $0xdaeb0f66 // por xmm3, xmm2 - LONG $0x6e0f4466; WORD $0x2444; BYTE $0x28 // movd xmm8, dword [rsp + 40] + LONG $0x6e0f4466; WORD $0x2444; BYTE $0x18 // movd xmm8, dword [rsp + 24] LONG $0x74b70f46; WORD $0x1c3e // movzx r14d, word [rsi + r15 + 28] LONG $0xf0650f66 // pcmpgtw xmm6, xmm0 LONG $0xf6630f66 // packsswb xmm6, xmm6 @@ -38070,7 +39069,7 @@ LBB7_123: LONG $0xf9eb0f66 // por xmm7, xmm1 LONG $0xf26e0f66 // movd xmm6, edx LONG $0x54b70f42; WORD $0x243e // movzx edx, word [rsi + r15 + 36] - LONG $0x20245489 // mov dword [rsp + 32], edx + LONG $0x30245489 // mov dword [rsp + 48], edx QUAD $0x0114066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 20], 1 QUAD $0x02140e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 20], 2 QUAD $0x0314266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 20], 3 @@ -38086,7 +39085,7 @@ LBB7_123: LONG $0xefeb0f66 // por xmm5, xmm7 LONG $0x6e0f4166; BYTE $0xfe // movd xmm7, r14d LONG $0x54b70f42; WORD $0x263e // movzx edx, word [rsi + r15 + 38] - LONG $0x28245489 // mov dword [rsp + 40], edx + LONG $0x18245489 // mov dword [rsp + 24], edx QUAD $0x01160654c40f4266 // pinsrw xmm2, word [rsi + r8 + 22], 1 QUAD $0x02160e54c40f4266 // pinsrw xmm2, word [rsi + r9 + 22], 2 QUAD $0x03162654c40f4266 // pinsrw xmm2, word [rsi + r12 + 22], 3 @@ -38117,7 +39116,7 @@ LBB7_123: LONG $0xddeb0f66 // por xmm3, xmm5 LONG $0xe86e0f66 // movd xmm5, eax LONG $0x44b70f42; WORD $0x2a3e // movzx eax, word [rsi + r15 + 42] - LONG $0x08244489 // mov dword [rsp + 8], eax + LONG $0x20244489 // mov dword [rsp + 32], eax QUAD $0x011a0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 26], 1 QUAD $0x021a0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 26], 2 QUAD $0x031a2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 26], 3 @@ -38157,7 +39156,7 @@ LBB7_123: LONG $0xf2710f66; BYTE $0x07 // psllw xmm2, 7 LONG $0xdb0f4166; BYTE $0xd6 // pand xmm2, xmm14 LONG $0xd7eb0f66 // por xmm2, xmm7 - LONG $0x746e0f66; WORD $0x2024 // movd xmm6, dword [rsp + 32] + LONG $0x746e0f66; WORD $0x3024 // movd xmm6, dword [rsp + 48] LONG $0x54b70f42; WORD $0x2e3e // movzx edx, word [rsi + r15 + 46] QUAD $0x0120066cc40f4266 // pinsrw xmm5, word [rsi + r8 + 32], 1 QUAD $0x02200e6cc40f4266 // pinsrw xmm5, word [rsi + r9 + 32], 2 @@ -38178,7 +39177,7 @@ LBB7_123: LONG $0xf96f0f66 // movdqa xmm7, xmm1 LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 LONG $0xf9f80f66 // psubb xmm7, xmm1 - LONG $0x5c6e0f66; WORD $0x2824 // movd xmm3, dword [rsp + 40] + LONG $0x5c6e0f66; WORD $0x1824 // movd xmm3, dword [rsp + 24] LONG $0x5cb70f46; WORD $0x303e // movzx r11d, word [rsi + r15 + 48] LONG $0x6cc40f66; WORD $0x201e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 32], 7 LONG $0xe8650f66 // pcmpgtw xmm5, xmm0 @@ -38213,7 +39212,7 @@ LBB7_123: LONG $0xf6710f66; BYTE $0x02 // psllw xmm6, 2 LONG $0xdb0f4166; BYTE $0xf1 // pand xmm6, xmm9 LONG $0xf7eb0f66 // por xmm6, xmm7 - LONG $0x4c6e0f66; WORD $0x0824 // movd xmm1, dword [rsp + 8] + LONG $0x4c6e0f66; WORD $0x2024 // movd xmm1, dword [rsp + 32] LONG $0x74b70f46; WORD $0x343e // movzx r14d, word [rsi + r15 + 52] LONG $0x6cc40f66; WORD $0x281e; BYTE $0x07 // pinsrw xmm5, word [rsi + rbx + 40], 7 LONG $0xd8650f66 // pcmpgtw xmm3, xmm0 @@ -38288,6 +39287,7 @@ LBB7_123: LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 LONG $0xe9f80f66 // psubb xmm5, xmm1 LONG $0x6e0f4166; BYTE $0xce // movd xmm1, r14d + LONG $0x24348b4c // mov r14, qword [rsp] QUAD $0x01300674c40f4266 // pinsrw xmm6, word [rsi + r8 + 48], 1 QUAD $0x02300e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 48], 2 QUAD $0x03302674c40f4266 // pinsrw xmm6, word [rsi + r12 + 48], 3 @@ -38371,7 +39371,6 @@ LBB7_123: QUAD $0x013e0674c40f4266 // pinsrw xmm6, word [rsi + r8 + 62], 1 QUAD $0x023e0e74c40f4266 // pinsrw xmm6, word [rsi + r9 + 62], 2 QUAD $0x033e2674c40f4266 // pinsrw xmm6, word [rsi + r12 + 62], 3 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x043e2e74c40f4266 // pinsrw xmm6, word [rsi + r13 + 62], 4 LONG $0x74c40f66; WORD $0x3e0e; BYTE $0x05 // pinsrw xmm6, word [rsi + rcx + 62], 5 LONG $0x74c40f66; WORD $0x3e3e; BYTE $0x06 // pinsrw xmm6, word [rsi + rdi + 62], 6 @@ -38394,19 +39393,428 @@ LBB7_123: LONG $0xe2600f66 // punpcklbw xmm4, xmm2 LONG $0xe3610f66 // punpcklwd xmm4, xmm3 LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] - LONG $0x247f0ff3; BYTE $0x88 // movdqu oword [rax + 4*rcx], xmm4 - LONG $0x4c7f0ff3; WORD $0x1088 // movdqu oword [rax + 4*rcx + 16], xmm1 + LONG $0x7f0f41f3; WORD $0x8e24 // movdqu oword [r14 + 4*rcx], xmm4 + LONG $0x2434894c // mov qword [rsp], r14 + LONG $0x7f0f41f3; WORD $0x8e4c; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm1 LONG $0x08c18348 // add rcx, 8 WORD $0x8949; BYTE $0xcf // mov r15, rcx - LONG $0x244c3b48; BYTE $0x18 // cmp rcx, qword [rsp + 24] - JNE LBB7_123 - QUAD $0x0000011024b48b4c // mov r14, qword [rsp + 272] - LONG $0x24743b4c; BYTE $0x18 // cmp r14, qword [rsp + 24] - QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] - LONG $0x24248b4c // mov r12, qword [rsp] - LONG $0x24748b48; BYTE $0x30 // mov rsi, qword [rsp + 48] + LONG $0x244c3b48; BYTE $0x28 // cmp rcx, qword [rsp + 40] JNE LBB7_125 - JMP LBB7_128 + QUAD $0x0000011024bc8b4c // mov r15, qword [rsp + 272] + LONG $0x247c3b4c; BYTE $0x28 // cmp r15, qword [rsp + 40] + QUAD $0x00000088249c8b4c // mov r11, qword [rsp + 136] + LONG $0x24748b4c; BYTE $0x08 // mov r14, qword [rsp + 8] + LONG $0x24748b48; BYTE $0x38 // mov rsi, qword [rsp + 56] + JNE LBB7_127 + JMP LBB7_130 + +LBB7_182: + WORD $0x894c; BYTE $0xd0 // mov rax, r10 + LONG $0xfce08348 // and rax, -4 + WORD $0x8948; BYTE $0xc2 // mov rdx, rax + LONG $0x07e2c148 // shl rdx, 7 + WORD $0x0148; BYTE $0xf2 // add rdx, rsi + LONG $0x240c8b48 // mov rcx, qword [rsp] + LONG $0x813c8d4c // lea r15, [rcx + 4*rax] + LONG $0xeb280f45 // movaps xmm13, xmm11 + LONG $0xebc60f45; BYTE $0x00 // shufps xmm13, xmm11, 0 + LONG $0xfcc68148; WORD $0x0001; BYTE $0x00 // add rsi, 508 + WORD $0xc931 // xor ecx, ecx + LONG $0x6f0f4466; WORD $0x007d // movdqa xmm15, oword 0[rbp] /* [rip + .LCPI7_0] */ + LONG $0x243c8b48 // mov rdi, qword [rsp] + +LBB7_183: + QUAD $0xfffffe049e100ff3 // movss xmm3, dword [rsi - 508] + QUAD $0xfffe0896100f44f3; BYTE $0xff // movss xmm10, dword [rsi - 504] + QUAD $0xfffe0c8e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 500] + QUAD $0xfffffe108e100ff3 // movss xmm1, dword [rsi - 496] + QUAD $0xfffe849e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 380], 16 + QUAD $0xffff049e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 252], 32 + LONG $0x213a0f66; WORD $0x845e; BYTE $0x30 // insertps xmm3, dword [rsi - 124], 48 + QUAD $0xfe8896213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm10, dword [rsi - 376], 16 + QUAD $0xff0896213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm10, dword [rsi - 248], 32 + QUAD $0x308856213a0f4466 // insertps xmm10, dword [rsi - 120], 48 + QUAD $0xfe8c8e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 372], 16 + QUAD $0xff0c8e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 244], 32 + QUAD $0x308c4e213a0f4466 // insertps xmm9, dword [rsi - 116], 48 + QUAD $0xfffe908e213a0f66; WORD $0x10ff // insertps xmm1, dword [rsi - 368], 16 + QUAD $0xffff108e213a0f66; WORD $0x20ff // insertps xmm1, dword [rsi - 240], 32 + LONG $0x213a0f66; WORD $0x904e; BYTE $0x30 // insertps xmm1, dword [rsi - 112], 48 + QUAD $0xfffe1486100f44f3; BYTE $0xff // movss xmm8, dword [rsi - 492] + QUAD $0xfe9486213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm8, dword [rsi - 364], 16 + QUAD $0xff1486213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm8, dword [rsi - 236], 32 + LONG $0xe5280f45 // movaps xmm12, xmm13 + QUAD $0x309446213a0f4466 // insertps xmm8, dword [rsi - 108], 48 + QUAD $0xfffffe1896100ff3 // movss xmm2, dword [rsi - 488] + QUAD $0xfffe9896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 360], 16 + QUAD $0xffff1896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 232], 32 + LONG $0xe3c20f44; BYTE $0x01 // cmpltps xmm12, xmm3 + LONG $0x213a0f66; WORD $0x9856; BYTE $0x30 // insertps xmm2, dword [rsi - 104], 48 + QUAD $0xfffffe1c9e100ff3 // movss xmm3, dword [rsi - 484] + QUAD $0xfffe9c9e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 356], 16 + QUAD $0xffff1c9e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 228], 32 + LONG $0x6b0f4566; BYTE $0xe4 // packssdw xmm12, xmm12 + LONG $0x213a0f66; WORD $0x9c5e; BYTE $0x30 // insertps xmm3, dword [rsi - 100], 48 + QUAD $0xfffffe24a6100ff3 // movss xmm4, dword [rsi - 476] + QUAD $0xfffea4a6213a0f66; WORD $0x10ff // insertps xmm4, dword [rsi - 348], 16 + QUAD $0xffff24a6213a0f66; WORD $0x20ff // insertps xmm4, dword [rsi - 220], 32 + LONG $0x630f4566; BYTE $0xe4 // packsswb xmm12, xmm12 + LONG $0x213a0f66; WORD $0xa466; BYTE $0x30 // insertps xmm4, dword [rsi - 92], 48 + LONG $0xfd280f41 // movaps xmm7, xmm13 + QUAD $0xfffffe44ae100ff3 // movss xmm5, dword [rsi - 444] + QUAD $0xfffec4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 316], 16 + QUAD $0xffff44ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 188], 32 + LONG $0x01fcc20f // cmpltps xmm7, xmm4 + LONG $0x213a0f66; WORD $0xc46e; BYTE $0x30 // insertps xmm5, dword [rsi - 60], 48 + LONG $0xf5280f41 // movaps xmm6, xmm13 + QUAD $0xfffffe6486100ff3 // movss xmm0, dword [rsi - 412] + QUAD $0xfffee486213a0f66; WORD $0x10ff // insertps xmm0, dword [rsi - 284], 16 + QUAD $0xffff6486213a0f66; WORD $0x20ff // insertps xmm0, dword [rsi - 156], 32 + LONG $0x01f5c20f // cmpltps xmm6, xmm5 + LONG $0x213a0f66; WORD $0xe446; BYTE $0x30 // insertps xmm0, dword [rsi - 28], 48 + LONG $0xe5280f41 // movaps xmm4, xmm13 + LONG $0x01e0c20f // cmpltps xmm4, xmm0 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0xc2c20f41; BYTE $0x01 // cmpltps xmm0, xmm10 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0x6f0f4466; BYTE $0xf0 // movdqa xmm14, xmm0 + LONG $0xdb0f4566; BYTE $0xf7 // pand xmm14, xmm15 + LONG $0xf80f4466; BYTE $0xf0 // psubb xmm14, xmm0 + QUAD $0xfffe2096100f44f3; BYTE $0xff // movss xmm10, dword [rsi - 480] + QUAD $0xfea096213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm10, dword [rsi - 352], 16 + LONG $0xdb0f4566; BYTE $0xe7 // pand xmm12, xmm15 + QUAD $0xff2096213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm10, dword [rsi - 224], 32 + LONG $0xeb0f4566; BYTE $0xf4 // por xmm14, xmm12 + LONG $0xed280f41 // movaps xmm5, xmm13 + LONG $0xe9c20f41; BYTE $0x01 // cmpltps xmm5, xmm9 + QUAD $0x30a056213a0f4466 // insertps xmm10, dword [rsi - 96], 48 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x02 // psllw xmm5, 2 + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI7_1] */ + LONG $0xe8db0f66 // pand xmm5, xmm0 + LONG $0xeb0f4166; BYTE $0xee // por xmm5, xmm14 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0x01c1c20f // cmpltps xmm0, xmm1 + LONG $0xcd280f41 // movaps xmm1, xmm13 + LONG $0xc8c20f41; BYTE $0x01 // cmpltps xmm1, xmm8 + QUAD $0xfffe288e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 472] + QUAD $0xfea88e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 344], 16 + QUAD $0xff288e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 216], 32 + QUAD $0x30a84e213a0f4466 // insertps xmm9, dword [rsi - 88], 48 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 + LONG $0xf0710f66; BYTE $0x03 // psllw xmm0, 3 + LONG $0x6f0f4466; WORD $0x2075 // movdqa xmm14, oword 32[rbp] /* [rip + .LCPI7_2] */ + LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 + LONG $0xc96b0f66 // packssdw xmm1, xmm1 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 + LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 + LONG $0x6f0f4466; WORD $0x3075 // movdqa xmm14, oword 48[rbp] /* [rip + .LCPI7_3] */ + LONG $0xdb0f4166; BYTE $0xce // pand xmm1, xmm14 + LONG $0xc8eb0f66 // por xmm1, xmm0 + QUAD $0xfffe2ca6100f44f3; BYTE $0xff // movss xmm12, dword [rsi - 468] + QUAD $0xfeaca6213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm12, dword [rsi - 340], 16 + QUAD $0xff2ca6213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm12, dword [rsi - 212], 32 + QUAD $0x30ac66213a0f4466 // insertps xmm12, dword [rsi - 84], 48 + LONG $0xcdeb0f66 // por xmm1, xmm5 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0x01c2c20f // cmpltps xmm0, xmm2 + LONG $0xed280f41 // movaps xmm5, xmm13 + LONG $0x01ebc20f // cmpltps xmm5, xmm3 + QUAD $0xfffffe3096100ff3 // movss xmm2, dword [rsi - 464] + QUAD $0xfffeb096213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 336], 16 + QUAD $0xffff3096213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 208], 32 + LONG $0xff6b0f66 // packssdw xmm7, xmm7 + LONG $0x213a0f66; WORD $0xb056; BYTE $0x30 // insertps xmm2, dword [rsi - 80], 48 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 + LONG $0xf0710f66; BYTE $0x05 // psllw xmm0, 5 + LONG $0x6f0f4466; WORD $0x4075 // movdqa xmm14, oword 64[rbp] /* [rip + .LCPI7_4] */ + LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x06 // psllw xmm5, 6 + LONG $0x5d6f0f66; BYTE $0x50 // movdqa xmm3, oword 80[rbp] /* [rip + .LCPI7_5] */ + LONG $0xebdb0f66 // pand xmm5, xmm3 + LONG $0xe8eb0f66 // por xmm5, xmm0 + LONG $0xc5280f45 // movaps xmm8, xmm13 + LONG $0xc2c20f45; BYTE $0x01 // cmpltps xmm8, xmm10 + QUAD $0xfffffe349e100ff3 // movss xmm3, dword [rsi - 460] + QUAD $0xfffeb49e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 332], 16 + QUAD $0xffff349e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 204], 32 + LONG $0x213a0f66; WORD $0xb45e; BYTE $0x30 // insertps xmm3, dword [rsi - 76], 48 + LONG $0x6b0f4566; BYTE $0xc0 // packssdw xmm8, xmm8 + LONG $0x630f4566; BYTE $0xc0 // packsswb xmm8, xmm8 + LONG $0x710f4166; WORD $0x07f0 // psllw xmm8, 7 + LONG $0x456f0f66; BYTE $0x60 // movdqa xmm0, oword 96[rbp] /* [rip + .LCPI7_6] */ + LONG $0xdb0f4466; BYTE $0xc0 // pand xmm8, xmm0 + LONG $0xeb0f4466; BYTE $0xc5 // por xmm8, xmm5 + QUAD $0xfffe3896100f44f3; BYTE $0xff // movss xmm10, dword [rsi - 456] + QUAD $0xfeb896213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm10, dword [rsi - 328], 16 + QUAD $0xff3896213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm10, dword [rsi - 200], 32 + LONG $0xff630f66 // packsswb xmm7, xmm7 + QUAD $0x30b856213a0f4466 // insertps xmm10, dword [rsi - 72], 48 + LONG $0xeb0f4466; BYTE $0xc1 // por xmm8, xmm1 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0xc1c20f41; BYTE $0x01 // cmpltps xmm0, xmm9 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xc86f0f66 // movdqa xmm1, xmm0 + LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 + LONG $0xc8f80f66 // psubb xmm1, xmm0 + QUAD $0xfffe3c8e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 452] + QUAD $0xfebc8e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 324], 16 + LONG $0xdb0f4166; BYTE $0xff // pand xmm7, xmm15 + QUAD $0xff3c8e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 196], 32 + LONG $0xcfeb0f66 // por xmm1, xmm7 + LONG $0xed280f41 // movaps xmm5, xmm13 + LONG $0xecc20f41; BYTE $0x01 // cmpltps xmm5, xmm12 + QUAD $0x30bc4e213a0f4466 // insertps xmm9, dword [rsi - 68], 48 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x02 // psllw xmm5, 2 + LONG $0x6ddb0f66; BYTE $0x10 // pand xmm5, oword 16[rbp] /* [rip + .LCPI7_1] */ + LONG $0xe9eb0f66 // por xmm5, xmm1 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0x01c2c20f // cmpltps xmm0, xmm2 + LONG $0xcd280f41 // movaps xmm1, xmm13 + LONG $0x01cbc20f // cmpltps xmm1, xmm3 + QUAD $0xfffffe409e100ff3 // movss xmm3, dword [rsi - 448] + QUAD $0xfffec09e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 320], 16 + QUAD $0xffff409e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 192], 32 + LONG $0x213a0f66; WORD $0xc05e; BYTE $0x30 // insertps xmm3, dword [rsi - 64], 48 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 + LONG $0xf0710f66; BYTE $0x03 // psllw xmm0, 3 + LONG $0x6f0f4466; WORD $0x2065 // movdqa xmm12, oword 32[rbp] /* [rip + .LCPI7_2] */ + LONG $0xdb0f4166; BYTE $0xc4 // pand xmm0, xmm12 + LONG $0xc96b0f66 // packssdw xmm1, xmm1 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 + LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 + LONG $0x4ddb0f66; BYTE $0x30 // pand xmm1, oword 48[rbp] /* [rip + .LCPI7_3] */ + LONG $0xc8eb0f66 // por xmm1, xmm0 + QUAD $0xfffffe4896100ff3 // movss xmm2, dword [rsi - 440] + QUAD $0xfffec896213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 312], 16 + QUAD $0xffff4896213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 184], 32 + LONG $0x213a0f66; WORD $0xc856; BYTE $0x30 // insertps xmm2, dword [rsi - 56], 48 + LONG $0xcdeb0f66 // por xmm1, xmm5 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0xc2c20f41; BYTE $0x01 // cmpltps xmm0, xmm10 + LONG $0xed280f41 // movaps xmm5, xmm13 + LONG $0xe9c20f41; BYTE $0x01 // cmpltps xmm5, xmm9 + QUAD $0xfffffe4cbe100ff3 // movss xmm7, dword [rsi - 436] + QUAD $0xfffeccbe213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 308], 16 + QUAD $0xffff4cbe213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 180], 32 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0x213a0f66; WORD $0xcc7e; BYTE $0x30 // insertps xmm7, dword [rsi - 52], 48 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 + LONG $0xf0710f66; BYTE $0x05 // psllw xmm0, 5 + LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x06 // psllw xmm5, 6 + LONG $0x6ddb0f66; BYTE $0x50 // pand xmm5, oword 80[rbp] /* [rip + .LCPI7_5] */ + LONG $0xe8eb0f66 // por xmm5, xmm0 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0x01c3c20f // cmpltps xmm0, xmm3 + QUAD $0xfffffe509e100ff3 // movss xmm3, dword [rsi - 432] + QUAD $0xfffed09e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 304], 16 + QUAD $0xffff509e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 176], 32 + LONG $0x213a0f66; WORD $0xd05e; BYTE $0x30 // insertps xmm3, dword [rsi - 48], 48 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xf0710f66; BYTE $0x07 // psllw xmm0, 7 + LONG $0x6f0f4466; WORD $0x6055 // movdqa xmm10, oword 96[rbp] /* [rip + .LCPI7_6] */ + LONG $0xdb0f4166; BYTE $0xc2 // pand xmm0, xmm10 + LONG $0xc5eb0f66 // por xmm0, xmm5 + QUAD $0xfffffe54ae100ff3 // movss xmm5, dword [rsi - 428] + QUAD $0xfffed4ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 300], 16 + QUAD $0xffff54ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 172], 32 + LONG $0x213a0f66; WORD $0xd46e; BYTE $0x30 // insertps xmm5, dword [rsi - 44], 48 + LONG $0xc1eb0f66 // por xmm0, xmm1 + QUAD $0xfffe588e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 424] + QUAD $0xfed88e213a0f4466; WORD $0xffff; BYTE $0x10 // insertps xmm9, dword [rsi - 296], 16 + QUAD $0xff588e213a0f4466; WORD $0xffff; BYTE $0x20 // insertps xmm9, dword [rsi - 168], 32 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + QUAD $0x30d84e213a0f4466 // insertps xmm9, dword [rsi - 40], 48 + LONG $0x620f4466; BYTE $0xc0 // punpckldq xmm8, xmm0 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0x01c2c20f // cmpltps xmm0, xmm2 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xc86f0f66 // movdqa xmm1, xmm0 + LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 + LONG $0xc8f80f66 // psubb xmm1, xmm0 + QUAD $0xfffffe5c96100ff3 // movss xmm2, dword [rsi - 420] + QUAD $0xfffedc96213a0f66; WORD $0x10ff // insertps xmm2, dword [rsi - 292], 16 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + QUAD $0xffff5c96213a0f66; WORD $0x20ff // insertps xmm2, dword [rsi - 164], 32 + LONG $0xceeb0f66 // por xmm1, xmm6 + LONG $0xf5280f41 // movaps xmm6, xmm13 + LONG $0x01f7c20f // cmpltps xmm6, xmm7 + LONG $0x213a0f66; WORD $0xdc56; BYTE $0x30 // insertps xmm2, dword [rsi - 36], 48 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf6710f66; BYTE $0x02 // psllw xmm6, 2 + LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI7_1] */ + LONG $0xf0db0f66 // pand xmm6, xmm0 + LONG $0xf1eb0f66 // por xmm6, xmm1 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0x01c3c20f // cmpltps xmm0, xmm3 + LONG $0xcd280f41 // movaps xmm1, xmm13 + LONG $0x01cdc20f // cmpltps xmm1, xmm5 + QUAD $0xfffffe609e100ff3 // movss xmm3, dword [rsi - 416] + QUAD $0xfffee09e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 288], 16 + QUAD $0xffff609e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 160], 32 + LONG $0x213a0f66; WORD $0xe05e; BYTE $0x30 // insertps xmm3, dword [rsi - 32], 48 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 + LONG $0xf0710f66; BYTE $0x03 // psllw xmm0, 3 + LONG $0xdb0f4166; BYTE $0xc4 // pand xmm0, xmm12 + LONG $0xc96b0f66 // packssdw xmm1, xmm1 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 + LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 + LONG $0x6f0f4466; WORD $0x3065 // movdqa xmm12, oword 48[rbp] /* [rip + .LCPI7_3] */ + LONG $0xdb0f4166; BYTE $0xcc // pand xmm1, xmm12 + LONG $0xc8eb0f66 // por xmm1, xmm0 + QUAD $0xfffffe68ae100ff3 // movss xmm5, dword [rsi - 408] + QUAD $0xfffee8ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 280], 16 + QUAD $0xffff68ae213a0f66; WORD $0x20ff // insertps xmm5, dword [rsi - 152], 32 + LONG $0x213a0f66; WORD $0xe86e; BYTE $0x30 // insertps xmm5, dword [rsi - 24], 48 + LONG $0xceeb0f66 // por xmm1, xmm6 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0xc1c20f41; BYTE $0x01 // cmpltps xmm0, xmm9 + LONG $0xf5280f41 // movaps xmm6, xmm13 + LONG $0x01f2c20f // cmpltps xmm6, xmm2 + QUAD $0xfffffe6cbe100ff3 // movss xmm7, dword [rsi - 404] + QUAD $0xfffeecbe213a0f66; WORD $0x10ff // insertps xmm7, dword [rsi - 276], 16 + QUAD $0xffff6cbe213a0f66; WORD $0x20ff // insertps xmm7, dword [rsi - 148], 32 + LONG $0xe46b0f66 // packssdw xmm4, xmm4 + LONG $0x213a0f66; WORD $0xec7e; BYTE $0x30 // insertps xmm7, dword [rsi - 20], 48 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xdb0f4166; BYTE $0xc7 // pand xmm0, xmm15 + LONG $0xf0710f66; BYTE $0x05 // psllw xmm0, 5 + LONG $0xdb0f4166; BYTE $0xc6 // pand xmm0, xmm14 + LONG $0xf66b0f66 // packssdw xmm6, xmm6 + LONG $0xf6630f66 // packsswb xmm6, xmm6 + LONG $0xdb0f4166; BYTE $0xf7 // pand xmm6, xmm15 + LONG $0xf6710f66; BYTE $0x06 // psllw xmm6, 6 + LONG $0x6f0f4466; WORD $0x504d // movdqa xmm9, oword 80[rbp] /* [rip + .LCPI7_5] */ + LONG $0xdb0f4166; BYTE $0xf1 // pand xmm6, xmm9 + LONG $0xf0eb0f66 // por xmm6, xmm0 + LONG $0xd5280f41 // movaps xmm2, xmm13 + LONG $0x01d3c20f // cmpltps xmm2, xmm3 + QUAD $0xfffffe7086100ff3 // movss xmm0, dword [rsi - 400] + QUAD $0xfffef086213a0f66; WORD $0x10ff // insertps xmm0, dword [rsi - 272], 16 + QUAD $0xffff7086213a0f66; WORD $0x20ff // insertps xmm0, dword [rsi - 144], 32 + LONG $0x213a0f66; WORD $0xf046; BYTE $0x30 // insertps xmm0, dword [rsi - 16], 48 + LONG $0xd26b0f66 // packssdw xmm2, xmm2 + LONG $0xd2630f66 // packsswb xmm2, xmm2 + LONG $0xf2710f66; BYTE $0x07 // psllw xmm2, 7 + LONG $0xdb0f4166; BYTE $0xd2 // pand xmm2, xmm10 + LONG $0xd6eb0f66 // por xmm2, xmm6 + QUAD $0xfffffe74b6100ff3 // movss xmm6, dword [rsi - 396] + QUAD $0xfffef4b6213a0f66; WORD $0x10ff // insertps xmm6, dword [rsi - 268], 16 + QUAD $0xffff74b6213a0f66; WORD $0x20ff // insertps xmm6, dword [rsi - 140], 32 + LONG $0xe4630f66 // packsswb xmm4, xmm4 + LONG $0x213a0f66; WORD $0xf476; BYTE $0x30 // insertps xmm6, dword [rsi - 12], 48 + LONG $0xd1eb0f66 // por xmm2, xmm1 + LONG $0xcd280f41 // movaps xmm1, xmm13 + LONG $0x01cdc20f // cmpltps xmm1, xmm5 + LONG $0xc96b0f66 // packssdw xmm1, xmm1 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + LONG $0xe96f0f66 // movdqa xmm5, xmm1 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xe9f80f66 // psubb xmm5, xmm1 + QUAD $0xfffffe789e100ff3 // movss xmm3, dword [rsi - 392] + QUAD $0xfffef89e213a0f66; WORD $0x10ff // insertps xmm3, dword [rsi - 264], 16 + LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 + QUAD $0xffff789e213a0f66; WORD $0x20ff // insertps xmm3, dword [rsi - 136], 32 + LONG $0xeceb0f66 // por xmm5, xmm4 + LONG $0xe5280f41 // movaps xmm4, xmm13 + LONG $0x01e7c20f // cmpltps xmm4, xmm7 + LONG $0x213a0f66; WORD $0xf85e; BYTE $0x30 // insertps xmm3, dword [rsi - 8], 48 + LONG $0xe46b0f66 // packssdw xmm4, xmm4 + LONG $0xe4630f66 // packsswb xmm4, xmm4 + LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 + LONG $0xf4710f66; BYTE $0x02 // psllw xmm4, 2 + LONG $0x65db0f66; BYTE $0x10 // pand xmm4, oword 16[rbp] /* [rip + .LCPI7_1] */ + LONG $0xe5eb0f66 // por xmm4, xmm5 + LONG $0xed280f41 // movaps xmm5, xmm13 + LONG $0x01e8c20f // cmpltps xmm5, xmm0 + LONG $0xcd280f41 // movaps xmm1, xmm13 + LONG $0x01cec20f // cmpltps xmm1, xmm6 + QUAD $0xfffffe7c86100ff3 // movss xmm0, dword [rsi - 388] + QUAD $0xfffefc86213a0f66; WORD $0x10ff // insertps xmm0, dword [rsi - 260], 16 + QUAD $0xffff7c86213a0f66; WORD $0x20ff // insertps xmm0, dword [rsi - 132], 32 + LONG $0x213a0f66; WORD $0xfc46; BYTE $0x30 // insertps xmm0, dword [rsi - 4], 48 + LONG $0xed6b0f66 // packssdw xmm5, xmm5 + LONG $0xed630f66 // packsswb xmm5, xmm5 + LONG $0xdb0f4166; BYTE $0xef // pand xmm5, xmm15 + LONG $0xf5710f66; BYTE $0x03 // psllw xmm5, 3 + LONG $0x6ddb0f66; BYTE $0x20 // pand xmm5, oword 32[rbp] /* [rip + .LCPI7_2] */ + LONG $0xc96b0f66 // packssdw xmm1, xmm1 + LONG $0xc9630f66 // packsswb xmm1, xmm1 + LONG $0xdb0f4166; BYTE $0xcf // pand xmm1, xmm15 + LONG $0xf1710f66; BYTE $0x04 // psllw xmm1, 4 + LONG $0xdb0f4166; BYTE $0xcc // pand xmm1, xmm12 + LONG $0xcdeb0f66 // por xmm1, xmm5 + QUAD $0xfffffe80ae100ff3 // movss xmm5, dword [rsi - 384] + QUAD $0xffff00ae213a0f66; WORD $0x10ff // insertps xmm5, dword [rsi - 256], 16 + LONG $0x213a0f66; WORD $0x806e; BYTE $0x20 // insertps xmm5, dword [rsi - 128], 32 + LONG $0xcceb0f66 // por xmm1, xmm4 + LONG $0xe5280f41 // movaps xmm4, xmm13 + LONG $0x01e3c20f // cmpltps xmm4, xmm3 + LONG $0xdd280f41 // movaps xmm3, xmm13 + LONG $0x01d8c20f // cmpltps xmm3, xmm0 + LONG $0x213a0f66; WORD $0x302e // insertps xmm5, dword [rsi], 48 + LONG $0xe46b0f66 // packssdw xmm4, xmm4 + LONG $0xe4630f66 // packsswb xmm4, xmm4 + LONG $0xdb0f4166; BYTE $0xe7 // pand xmm4, xmm15 + LONG $0xf4710f66; BYTE $0x05 // psllw xmm4, 5 + LONG $0xdb0f4166; BYTE $0xe6 // pand xmm4, xmm14 + LONG $0xdb6b0f66 // packssdw xmm3, xmm3 + LONG $0xdb630f66 // packsswb xmm3, xmm3 + LONG $0xdb0f4166; BYTE $0xdf // pand xmm3, xmm15 + LONG $0xf3710f66; BYTE $0x06 // psllw xmm3, 6 + LONG $0xdb0f4166; BYTE $0xd9 // pand xmm3, xmm9 + LONG $0xdceb0f66 // por xmm3, xmm4 + LONG $0xc5280f41 // movaps xmm0, xmm13 + LONG $0x01c5c20f // cmpltps xmm0, xmm5 + LONG $0xc06b0f66 // packssdw xmm0, xmm0 + LONG $0xc0630f66 // packsswb xmm0, xmm0 + LONG $0xf0710f66; BYTE $0x07 // psllw xmm0, 7 + LONG $0xdb0f4166; BYTE $0xc2 // pand xmm0, xmm10 + LONG $0xc3eb0f66 // por xmm0, xmm3 + LONG $0xc1eb0f66 // por xmm0, xmm1 + LONG $0xd0620f66 // punpckldq xmm2, xmm0 + LONG $0x600f4466; BYTE $0xc2 // punpcklbw xmm8, xmm2 + LONG $0x380f4466; WORD $0x4500; BYTE $0x70 // pshufb xmm8, oword 112[rbp] /* [rip + .LCPI7_7] */ + LONG $0x7f0f44f3; WORD $0x8f04 // movdqu oword [rdi + 4*rcx], xmm8 + LONG $0x04c18348 // add rcx, 4 + LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // add rsi, 512 + WORD $0x3948; BYTE $0xc8 // cmp rax, rcx + JNE LBB7_183 + WORD $0x3949; BYTE $0xc2 // cmp r10, rax + JNE LBB7_185 + JMP LBB7_188 DATA LCDATA6<>+0x000(SB)/8, $0x0000000001010101 DATA LCDATA6<>+0x008(SB)/8, $0x0000000000000000 @@ -46936,15 +48344,15 @@ TEXT ·_comparison_greater_equal_arr_scalar_sse4(SB), $520-48 WORD $0x894d; BYTE $0xc2 // mov r10, r8 WORD $0x8949; BYTE $0xce // mov r14, rcx WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JG LBB10_16 + JG LBB10_26 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JLE LBB10_31 + JLE LBB10_2 WORD $0xff83; BYTE $0x04 // cmp edi, 4 - JE LBB10_81 + JE LBB10_99 WORD $0xff83; BYTE $0x05 // cmp edi, 5 - JE LBB10_92 + JE LBB10_114 WORD $0xff83; BYTE $0x06 // cmp edi, 6 - JNE LBB10_182 + JNE LBB10_201 WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -46954,10 +48362,10 @@ TEXT ·_comparison_greater_equal_arr_scalar_sse4(SB), $520-48 LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_9 + JE LBB10_17 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_7: +LBB10_15: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x04768d48 // lea rsi, [rsi + 4] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -46978,63 +48386,63 @@ LBB10_7: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_7 + JNE LBB10_15 LONG $0x01c68349 // add r14, 1 -LBB10_9: +LBB10_17: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_13 + JL LBB10_21 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x00000170249c894c // mov qword [rsp + 368], r11 QUAD $0x00000140249c894c // mov qword [rsp + 320], r11 -LBB10_11: +LBB10_19: QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - QUAD $0x000000a02494930f // setae byte [rsp + 160] + QUAD $0x000000f02494930f // setae byte [rsp + 240] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd7930f40 // setae dil + LONG $0xd2930f41 // setae r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd6930f41 // setae r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d - QUAD $0x000001502494930f // setae byte [rsp + 336] + QUAD $0x000001102494930f // setae byte [rsp + 272] LONG $0x106e3944 // cmp dword [rsi + 16], r13d - QUAD $0x000000e02494930f // setae byte [rsp + 224] - LONG $0x146e3944 // cmp dword [rsi + 20], r13d QUAD $0x000000d02494930f // setae byte [rsp + 208] + LONG $0x146e3944 // cmp dword [rsi + 20], r13d + QUAD $0x000000c02494930f // setae byte [rsp + 192] LONG $0x186e3944 // cmp dword [rsi + 24], r13d WORD $0x930f; BYTE $0xd0 // setae al LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d WORD $0x930f; BYTE $0xd3 // setae bl LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x000001302494930f // setae byte [rsp + 304] + QUAD $0x000001502494930f // setae byte [rsp + 336] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0xd7930f40 // setae dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd1930f41 // setae r9b + LONG $0xd0930f41 // setae r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd2930f41 // setae r10b + LONG $0xd1930f41 // setae r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd3930f41 // setae r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d LONG $0xd4930f41 // setae r12b LONG $0x386e3944 // cmp dword [rsi + 56], r13d - QUAD $0x000001002494930f // setae byte [rsp + 256] + QUAD $0x000001202494930f // setae byte [rsp + 288] LONG $0x3c6e3944 // cmp dword [rsi + 60], r13d WORD $0x930f; BYTE $0xd1 // setae cl LONG $0x406e3944 // cmp dword [rsi + 64], r13d - QUAD $0x000000b02494930f // setae byte [rsp + 176] + QUAD $0x000000a02494930f // setae byte [rsp + 160] LONG $0x446e3944 // cmp dword [rsi + 68], r13d - QUAD $0x000001102494930f // setae byte [rsp + 272] + QUAD $0x000001302494930f // setae byte [rsp + 304] LONG $0x486e3944 // cmp dword [rsi + 72], r13d - QUAD $0x000001202494930f // setae byte [rsp + 288] + QUAD $0x000001002494930f // setae byte [rsp + 256] LONG $0x4c6e3944 // cmp dword [rsi + 76], r13d - QUAD $0x000000f02494930f // setae byte [rsp + 240] + QUAD $0x000000e02494930f // setae byte [rsp + 224] LONG $0x506e3944 // cmp dword [rsi + 80], r13d - QUAD $0x000000c02494930f // setae byte [rsp + 192] + QUAD $0x000000b02494930f // setae byte [rsp + 176] LONG $0x546e3944 // cmp dword [rsi + 84], r13d - QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] LONG $0x586e3944 // cmp dword [rsi + 88], r13d QUAD $0x000000802494930f // setae byte [rsp + 128] LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d @@ -47042,7 +48450,7 @@ LBB10_11: LONG $0x606e3944 // cmp dword [rsi + 96], r13d LONG $0x2454930f; BYTE $0x30 // setae byte [rsp + 48] LONG $0x646e3944 // cmp dword [rsi + 100], r13d - LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + QUAD $0x000000902494930f // setae byte [rsp + 144] LONG $0x686e3944 // cmp dword [rsi + 104], r13d LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d @@ -47054,121 +48462,122 @@ LBB10_11: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x2454930f; BYTE $0x08 // setae byte [rsp + 8] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd0930f41 // setae r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] + WORD $0x930f; BYTE $0xd2 // setae dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000f024940244 // add r10b, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x0000015024bc0240 // add dil, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000010024bcb60f // movzx edi, byte [rsp + 256] + QUAD $0x0000012024bcb60f // movzx edi, byte [rsp + 288] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] - QUAD $0x000001102494b60f // movzx edx, byte [rsp + 272] - WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - WORD $0xd789 // mov edi, edx - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + QUAD $0x000001302484b60f // movzx eax, byte [rsp + 304] + WORD $0xc000 // add al, al + LONG $0xa0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 160] + WORD $0xc789 // mov edi, eax + QUAD $0x000001002484b60f // movzx eax, byte [rsp + 256] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000e02484b60f // movzx eax, byte [rsp + 224] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] - WORD $0xc900 // add cl, cl - LONG $0x30244c02 // add cl, byte [rsp + 48] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xc000 // add al, al + LONG $0x30244402 // add al, byte [rsp + 48] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c68349 // add r14, 4 QUAD $0x0000014024848348; BYTE $0xff // add qword [rsp + 320], -1 - JNE LBB10_11 + JNE LBB10_19 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x00000170249c8b4c // mov r11, qword [rsp + 368] -LBB10_13: +LBB10_21: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB10_162 + JNE LBB10_137 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_164 + JMP LBB10_24 -LBB10_16: +LBB10_26: WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JLE LBB10_45 + JLE LBB10_27 WORD $0xff83; BYTE $0x09 // cmp edi, 9 - JE LBB10_104 + JE LBB10_157 WORD $0xff83; BYTE $0x0b // cmp edi, 11 - JE LBB10_115 + JE LBB10_172 WORD $0xff83; BYTE $0x0c // cmp edi, 12 - JNE LBB10_182 + JNE LBB10_201 LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xda490f4d // cmovns r11, r10 @@ -47178,14 +48587,15 @@ LBB10_16: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x02100ff2 // movsd xmm0, qword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_24 + JE LBB10_49 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_22: - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - WORD $0x960f; BYTE $0xd2 // setbe dl +LBB10_47: + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] LONG $0x08c68348 // add rsi, 8 - WORD $0xdaf6 // neg dl + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x000000ba; BYTE $0x00 // mov edx, 0 + WORD $0xd280; BYTE $0xff // adc dl, -1 LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax @@ -47202,191 +48612,221 @@ LBB10_22: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_22 + JNE LBB10_47 LONG $0x01c68349 // add r14, 1 -LBB10_24: +LBB10_49: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_28 + JL LBB10_53 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x00000140249c894c // mov qword [rsp + 320], r11 - QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB10_26: +LBB10_51: QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - QUAD $0x000001502494960f // setbe byte [rsp + 336] - LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] - LONG $0xd1960f41 // setbe r9b - LONG $0x462e0f66; BYTE $0x10 // ucomisd xmm0, qword [rsi + 16] - LONG $0xd6960f41 // setbe r14b - LONG $0x462e0f66; BYTE $0x18 // ucomisd xmm0, qword [rsi + 24] - LONG $0xd5960f41 // setbe r13b - LONG $0x462e0f66; BYTE $0x20 // ucomisd xmm0, qword [rsi + 32] - QUAD $0x000000e02494960f // setbe byte [rsp + 224] - LONG $0x462e0f66; BYTE $0x28 // ucomisd xmm0, qword [rsi + 40] - QUAD $0x000000d02494960f // setbe byte [rsp + 208] - LONG $0x462e0f66; BYTE $0x30 // ucomisd xmm0, qword [rsi + 48] - WORD $0x960f; BYTE $0xd0 // setbe al - LONG $0x462e0f66; BYTE $0x38 // ucomisd xmm0, qword [rsi + 56] - WORD $0x960f; BYTE $0xd3 // setbe bl - LONG $0x462e0f66; BYTE $0x40 // ucomisd xmm0, qword [rsi + 64] - QUAD $0x000001002494960f // setbe byte [rsp + 256] - LONG $0x462e0f66; BYTE $0x48 // ucomisd xmm0, qword [rsi + 72] - WORD $0x960f; BYTE $0xd2 // setbe dl - LONG $0x462e0f66; BYTE $0x50 // ucomisd xmm0, qword [rsi + 80] - LONG $0xd7960f40 // setbe dil - LONG $0x462e0f66; BYTE $0x58 // ucomisd xmm0, qword [rsi + 88] - LONG $0xd2960f41 // setbe r10b - LONG $0x462e0f66; BYTE $0x60 // ucomisd xmm0, qword [rsi + 96] - LONG $0xd3960f41 // setbe r11b - LONG $0x462e0f66; BYTE $0x68 // ucomisd xmm0, qword [rsi + 104] - LONG $0xd4960f41 // setbe r12b - LONG $0x462e0f66; BYTE $0x70 // ucomisd xmm0, qword [rsi + 112] - QUAD $0x000001102494960f // setbe byte [rsp + 272] - LONG $0x462e0f66; BYTE $0x78 // ucomisd xmm0, qword [rsi + 120] - WORD $0x960f; BYTE $0xd1 // setbe cl - QUAD $0x00000080862e0f66 // ucomisd xmm0, qword [rsi + 128] - QUAD $0x000000b02494960f // setbe byte [rsp + 176] - QUAD $0x00000088862e0f66 // ucomisd xmm0, qword [rsi + 136] - QUAD $0x000001302494960f // setbe byte [rsp + 304] - QUAD $0x00000090862e0f66 // ucomisd xmm0, qword [rsi + 144] - QUAD $0x000001202494960f // setbe byte [rsp + 288] - QUAD $0x00000098862e0f66 // ucomisd xmm0, qword [rsi + 152] - QUAD $0x000000f02494960f // setbe byte [rsp + 240] - QUAD $0x000000a0862e0f66 // ucomisd xmm0, qword [rsi + 160] - QUAD $0x000000c02494960f // setbe byte [rsp + 192] - QUAD $0x000000a8862e0f66 // ucomisd xmm0, qword [rsi + 168] - QUAD $0x000000902494960f // setbe byte [rsp + 144] - QUAD $0x000000b0862e0f66 // ucomisd xmm0, qword [rsi + 176] - QUAD $0x000000802494960f // setbe byte [rsp + 128] - QUAD $0x000000b8862e0f66 // ucomisd xmm0, qword [rsi + 184] - LONG $0xd7960f41 // setbe r15b - QUAD $0x000000c0862e0f66 // ucomisd xmm0, qword [rsi + 192] - LONG $0x2454960f; BYTE $0x30 // setbe byte [rsp + 48] - QUAD $0x000000c8862e0f66 // ucomisd xmm0, qword [rsi + 200] - LONG $0x2454960f; BYTE $0x70 // setbe byte [rsp + 112] - QUAD $0x000000d0862e0f66 // ucomisd xmm0, qword [rsi + 208] - LONG $0x2454960f; BYTE $0x60 // setbe byte [rsp + 96] - QUAD $0x000000d8862e0f66 // ucomisd xmm0, qword [rsi + 216] - LONG $0x2454960f; BYTE $0x50 // setbe byte [rsp + 80] - QUAD $0x000000e0862e0f66 // ucomisd xmm0, qword [rsi + 224] - LONG $0x2454960f; BYTE $0x20 // setbe byte [rsp + 32] - QUAD $0x000000e8862e0f66 // ucomisd xmm0, qword [rsi + 232] - LONG $0x2454960f; BYTE $0x10 // setbe byte [rsp + 16] - QUAD $0x000000f0862e0f66 // ucomisd xmm0, qword [rsi + 240] - LONG $0x2454960f; BYTE $0x08 // setbe byte [rsp + 8] - QUAD $0x000000f8862e0f66 // ucomisd xmm0, qword [rsi + 248] - LONG $0xd0960f41 // setbe r8b - WORD $0x0045; BYTE $0xc9 // add r9b, r9b - QUAD $0x00000150248c0244 // add r9b, byte [rsp + 336] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - WORD $0xe3c0; BYTE $0x07 // shl bl, 7 - WORD $0xc308 // or bl, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xce // or r14b, r9b + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] + LONG $0x56100ff2; BYTE $0x08 // movsd xmm2, qword [rsi + 8] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000001202494930f // setae byte [rsp + 288] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0x4e100ff2; BYTE $0x10 // movsd xmm1, qword [rsi + 16] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0xd7930f40 // setae dil + LONG $0x4e100ff2; BYTE $0x18 // movsd xmm1, qword [rsi + 24] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0xd0930f41 // setae r8b + LONG $0x4e100ff2; BYTE $0x20 // movsd xmm1, qword [rsi + 32] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0xd1930f41 // setae r9b + LONG $0x4e100ff2; BYTE $0x28 // movsd xmm1, qword [rsi + 40] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000001102494930f // setae byte [rsp + 272] + LONG $0x4e100ff2; BYTE $0x30 // movsd xmm1, qword [rsi + 48] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000001302494930f // setae byte [rsp + 304] + LONG $0x4e100ff2; BYTE $0x38 // movsd xmm1, qword [rsi + 56] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000001502494930f // setae byte [rsp + 336] + LONG $0x4e100ff2; BYTE $0x40 // movsd xmm1, qword [rsi + 64] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000c02494930f // setae byte [rsp + 192] + LONG $0x4e100ff2; BYTE $0x48 // movsd xmm1, qword [rsi + 72] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0xd2930f41 // setae r10b + LONG $0x4e100ff2; BYTE $0x50 // movsd xmm1, qword [rsi + 80] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x930f; BYTE $0xd3 // setae bl + LONG $0x4e100ff2; BYTE $0x58 // movsd xmm1, qword [rsi + 88] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0xd7930f41 // setae r15b + LONG $0x4e100ff2; BYTE $0x60 // movsd xmm1, qword [rsi + 96] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + WORD $0x930f; BYTE $0xd0 // setae al + LONG $0x4e100ff2; BYTE $0x68 // movsd xmm1, qword [rsi + 104] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000d02494930f // setae byte [rsp + 208] + LONG $0x4e100ff2; BYTE $0x70 // movsd xmm1, qword [rsi + 112] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000b02494930f // setae byte [rsp + 176] + LONG $0x4e100ff2; BYTE $0x78 // movsd xmm1, qword [rsi + 120] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000e02494930f // setae byte [rsp + 224] + QUAD $0x000000808e100ff2 // movsd xmm1, qword [rsi + 128] + QUAD $0x0000008896100ff2 // movsd xmm2, qword [rsi + 136] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000908e100ff2 // movsd xmm1, qword [rsi + 144] + QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + QUAD $0x0000009896100ff2 // movsd xmm2, qword [rsi + 152] + LONG $0xd3930f41 // setae r11b + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000a08e100ff2 // movsd xmm1, qword [rsi + 160] + LONG $0xd6930f41 // setae r14b + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + QUAD $0x000000a896100ff2 // movsd xmm2, qword [rsi + 168] + LONG $0xd4930f41 // setae r12b + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000b08e100ff2 // movsd xmm1, qword [rsi + 176] + QUAD $0x000001002494930f // setae byte [rsp + 256] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + QUAD $0x000000b896100ff2 // movsd xmm2, qword [rsi + 184] + QUAD $0x000000a02494930f // setae byte [rsp + 160] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000c08e100ff2 // movsd xmm1, qword [rsi + 192] + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + QUAD $0x000000c896100ff2 // movsd xmm2, qword [rsi + 200] + LONG $0xd5930f41 // setae r13b + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000d08e100ff2 // movsd xmm1, qword [rsi + 208] + LONG $0x2454930f; BYTE $0x10 // setae byte [rsp + 16] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + QUAD $0x000000d896100ff2 // movsd xmm2, qword [rsi + 216] + QUAD $0x000000802494930f // setae byte [rsp + 128] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000e08e100ff2 // movsd xmm1, qword [rsi + 224] + LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + QUAD $0x000000e896100ff2 // movsd xmm2, qword [rsi + 232] + LONG $0x2454930f; BYTE $0x50 // setae byte [rsp + 80] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + QUAD $0x000000f08e100ff2 // movsd xmm1, qword [rsi + 240] + LONG $0x2454930f; BYTE $0x30 // setae byte [rsp + 48] + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + QUAD $0x000000f896100ff2 // movsd xmm2, qword [rsi + 248] + LONG $0x2454930f; BYTE $0x20 // setae byte [rsp + 32] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x2454930f; BYTE $0x08 // setae byte [rsp + 8] + LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + LONG $0xd02e0f66 // ucomisd xmm2, xmm0 + WORD $0x930f; BYTE $0xd1 // setae cl WORD $0xd200 // add dl, dl - LONG $0x00249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 256] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b + LONG $0x20249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 288] LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd1 // mov r9d, edx - QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0841; BYTE $0xfa // or r10b, dil - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] + LONG $0x03e0c041 // shl r8b, 3 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + LONG $0x04e1c041 // shl r9b, 4 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000001102494b60f // movzx edx, byte [rsp + 272] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0844; BYTE $0xca // or dl, r9b - LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000011024bcb60f // movzx edi, byte [rsp + 272] - LONG $0x06e7c040 // shl dil, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl - WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x000001302484b60f // movzx eax, byte [rsp + 304] - WORD $0xc000 // add al, al - LONG $0xb0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 176] - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al WORD $0xd789 // mov edi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 + QUAD $0x0001302484b60f44; BYTE $0x00 // movzx r8d, byte [rsp + 304] + LONG $0x06e0c041 // shl r8b, 6 + QUAD $0x000001502494b60f // movzx edx, byte [rsp + 336] + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0x0844; BYTE $0xc2 // or dl, r8b + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000c024940244 // add r10b, byte [rsp + 192] + WORD $0xe3c0; BYTE $0x02 // shl bl, 2 + WORD $0x0844; BYTE $0xd3 // or bl, r10b + LONG $0x03e7c041 // shl r15b, 3 + WORD $0x0841; BYTE $0xdf // or r15b, bl + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xf8 // or al, r15b WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + QUAD $0x000000d0249cb60f // movzx ebx, byte [rsp + 208] + WORD $0xe3c0; BYTE $0x05 // shl bl, 5 + WORD $0xc308 // or bl, al + WORD $0xdf89 // mov edi, ebx + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + QUAD $0x000000e0249cb60f // movzx ebx, byte [rsp + 224] + WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + WORD $0xc308 // or bl, al + WORD $0x0045; BYTE $0xdb // add r11b, r11b + QUAD $0x00000090249c0244 // add r11b, byte [rsp + 144] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xde // or r14b, r11b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xf4 // or r12b, r14b + QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] + QUAD $0x000001002484b60f // movzx eax, byte [rsp + 256] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x8941; BYTE $0xc0 // mov r8d, eax + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0x0840; BYTE $0xfb // or bl, dil + LONG $0x247cb60f; BYTE $0x70 // movzx edi, byte [rsp + 112] + LONG $0x06e7c040 // shl dil, 6 + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xfd // or r13b, dil + WORD $0x8841; BYTE $0x16 // mov byte [r14], dl + WORD $0x0841; BYTE $0xc5 // or r13b, al + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] + WORD $0xc000 // add al, al + LONG $0x10244402 // add al, byte [rsp + 16] + WORD $0xc289 // mov edx, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xd008 // or al, dl + WORD $0xc289 // mov edx, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xd008 // or al, dl + WORD $0xc289 // mov edx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xd008 // or al, dl + LONG $0x2454b60f; BYTE $0x20 // movzx edx, byte [rsp + 32] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0x8841; BYTE $0x1e // mov byte [r14], bl - QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] + WORD $0xc208 // or dl, al + LONG $0x015e8841 // mov byte [r14 + 1], bl + LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xdf // or r15b, bl - LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] - WORD $0xc900 // add cl, cl - LONG $0x30244c02 // add cl, byte [rsp + 48] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xe1c0; BYTE $0x07 // shl cl, 7 + WORD $0xd908 // or cl, bl + LONG $0x026e8845 // mov byte [r14 + 2], r13b WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl - LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b - LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 + LONG $0x034e8841 // mov byte [r14 + 3], cl LONG $0x04c68349 // add r14, 4 - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 - JNE LBB10_26 + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 + JNE LBB10_51 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x00000140249c8b4c // mov r11, qword [rsp + 320] -LBB10_28: +LBB10_53: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB10_166 + JNE LBB10_195 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_168 + JMP LBB10_197 -LBB10_31: +LBB10_2: WORD $0xff83; BYTE $0x02 // cmp edi, 2 - JE LBB10_58 + JE LBB10_56 WORD $0xff83; BYTE $0x03 // cmp edi, 3 - JNE LBB10_182 + JNE LBB10_201 WORD $0x8a44; BYTE $0x1a // mov r11b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -47396,10 +48836,10 @@ LBB10_31: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_37 + JE LBB10_8 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_35: +LBB10_6: WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b LONG $0x01768d48 // lea rsi, [rsi + 1] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -47420,38 +48860,37 @@ LBB10_35: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_35 + JNE LBB10_6 LONG $0x01c68349 // add r14, 1 -LBB10_37: +LBB10_8: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_127 + JL LBB10_9 LONG $0x10ff8349 // cmp r15, 16 LONG $0x245c8844; BYTE $0x08 // mov byte [rsp + 8], r11b LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x000001b024bc894c // mov qword [rsp + 432], r15 - JB LBB10_41 + JB LBB10_82 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB10_191 + JAE LBB10_85 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB10_191 + JAE LBB10_85 -LBB10_41: - WORD $0xc031 // xor eax, eax - QUAD $0x000000a024848948 // mov qword [rsp + 160], rax - LONG $0x2474894c; BYTE $0x70 // mov qword [rsp + 112], r14 +LBB10_82: + WORD $0xc031 // xor eax, eax + QUAD $0x000000f024848948 // mov qword [rsp + 240], rax + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 -LBB10_42: - WORD $0x894d; BYTE $0xfe // mov r14, r15 - QUAD $0x000000a024b42b4c // sub r14, qword [rsp + 160] - QUAD $0x0000017024b4894c // mov qword [rsp + 368], r14 +LBB10_88: + QUAD $0x000000f024bc2b4c // sub r15, qword [rsp + 240] + QUAD $0x0000017024bc894c // mov qword [rsp + 368], r15 -LBB10_43: +LBB10_89: WORD $0x8948; BYTE $0xf1 // mov rcx, rsi WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b QUAD $0x0000014024949d0f // setge byte [rsp + 320] @@ -47464,19 +48903,19 @@ LBB10_43: LONG $0xd49d0f41 // setge r12b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x04 // cmp byte [rcx + 4], al - QUAD $0x0000015024949d0f // setge byte [rsp + 336] + QUAD $0x0000011024949d0f // setge byte [rsp + 272] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x05 // cmp byte [rcx + 5], al - QUAD $0x0000009024949d0f // setge byte [rsp + 144] + LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x06 // cmp byte [rcx + 6], al - QUAD $0x000000a024949d0f // setge byte [rsp + 160] + QUAD $0x000000f024949d0f // setge byte [rsp + 240] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x07 // cmp byte [rcx + 7], al LONG $0xd19d0f41 // setge r9b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x08 // cmp byte [rcx + 8], al - QUAD $0x0000013024949d0f // setge byte [rsp + 304] + QUAD $0x0000015024949d0f // setge byte [rsp + 336] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x09 // cmp byte [rcx + 9], al WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -47494,31 +48933,31 @@ LBB10_43: LONG $0xd59d0f41 // setge r13b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x0e // cmp byte [rcx + 14], al - QUAD $0x0000010024949d0f // setge byte [rsp + 256] + QUAD $0x0000012024949d0f // setge byte [rsp + 288] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x0f // cmp byte [rcx + 15], al LONG $0xd09d0f41 // setge r8b LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x10 // cmp byte [rcx + 16], bl - QUAD $0x0000011024949d0f // setge byte [rsp + 272] + QUAD $0x0000013024949d0f // setge byte [rsp + 304] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x11 // cmp byte [rcx + 17], bl - QUAD $0x0000012024949d0f // setge byte [rsp + 288] + QUAD $0x0000010024949d0f // setge byte [rsp + 256] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x12 // cmp byte [rcx + 18], bl - QUAD $0x000000e024949d0f // setge byte [rsp + 224] + QUAD $0x000000d024949d0f // setge byte [rsp + 208] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x13 // cmp byte [rcx + 19], bl - QUAD $0x000000f024949d0f // setge byte [rsp + 240] + QUAD $0x000000e024949d0f // setge byte [rsp + 224] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x14 // cmp byte [rcx + 20], bl - QUAD $0x000000b024949d0f // setge byte [rsp + 176] + QUAD $0x000000a024949d0f // setge byte [rsp + 160] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x15 // cmp byte [rcx + 21], bl - QUAD $0x000000d024949d0f // setge byte [rsp + 208] + QUAD $0x000000c024949d0f // setge byte [rsp + 192] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x16 // cmp byte [rcx + 22], bl - QUAD $0x000000c024949d0f // setge byte [rsp + 192] + QUAD $0x000000b024949d0f // setge byte [rsp + 176] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x17 // cmp byte [rcx + 23], bl LONG $0xd39d0f41 // setge r11b @@ -47548,60 +48987,60 @@ LBB10_43: WORD $0x9d0f; BYTE $0xd3 // setge bl WORD $0x0040; BYTE $0xf6 // add sil, sil QUAD $0x0000014024b40240 // add sil, byte [rsp + 320] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + QUAD $0x000000f02484b60f // movzx eax, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e1c041 // shl r9b, 7 WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xd200 // add dl, dl - LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] + LONG $0x50249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 336] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x7cb60f44; WORD $0x0824 // movzx r15d, byte [rsp + 8] LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b LONG $0x03e2c041 // shl r10b, 3 WORD $0x0841; BYTE $0xfa // or r10b, dil - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0xc208 // or dl, al LONG $0x04e6c041 // shl r14b, 4 WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x05e5c041 // shl r13b, 5 WORD $0x0845; BYTE $0xf5 // or r13b, r14b - QUAD $0x0000010024b4b60f // movzx esi, byte [rsp + 256] + QUAD $0x0000012024b4b60f // movzx esi, byte [rsp + 288] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xf0 // or r8b, sil WORD $0x0841; BYTE $0xd1 // or r9b, dl WORD $0x0845; BYTE $0xe8 // or r8b, r13b - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] + QUAD $0x000001002494b60f // movzx edx, byte [rsp + 256] WORD $0xd200 // add dl, dl - LONG $0x10249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 272] + LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] WORD $0xd689 // mov esi, edx - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] + QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] + QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - QUAD $0x000000b02494b60f // movzx edx, byte [rsp + 176] + QUAD $0x000000a02494b60f // movzx edx, byte [rsp + 160] WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] + QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x0000009024948b48 // mov rdx, qword [rsp + 144] WORD $0x8844; BYTE $0x0a // mov byte [rdx], r9b - QUAD $0x000000c024bcb60f // movzx edi, byte [rsp + 192] + QUAD $0x000000b024bcb60f // movzx edi, byte [rsp + 176] LONG $0x06e7c040 // shl dil, 6 LONG $0x07e3c041 // shl r11b, 7 WORD $0x0841; BYTE $0xfb // or r11b, dil @@ -47636,18 +49075,18 @@ LBB10_43: WORD $0x5a88; BYTE $0x03 // mov byte [rdx + 3], bl LONG $0x20718d48 // lea rsi, [rcx + 32] LONG $0x04c28348 // add rdx, 4 - LONG $0x24548948; BYTE $0x70 // mov qword [rsp + 112], rdx + QUAD $0x0000009024948948 // mov qword [rsp + 144], rdx QUAD $0x0000017024848348; BYTE $0xff // add qword [rsp + 368], -1 - JNE LBB10_43 + JNE LBB10_89 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x000001b024bc8b4c // mov r15, qword [rsp + 432] - JMP LBB10_128 + JMP LBB10_91 -LBB10_45: +LBB10_27: WORD $0xff83; BYTE $0x07 // cmp edi, 7 - JE LBB10_70 + JE LBB10_139 WORD $0xff83; BYTE $0x08 // cmp edi, 8 - JNE LBB10_182 + JNE LBB10_201 WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -47657,10 +49096,10 @@ LBB10_45: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_51 + JE LBB10_33 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_49: +LBB10_31: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -47681,63 +49120,63 @@ LBB10_49: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_49 + JNE LBB10_31 LONG $0x01c68349 // add r14, 1 -LBB10_51: +LBB10_33: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_55 + JL LBB10_37 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x00000170249c894c // mov qword [rsp + 368], r11 QUAD $0x00000140249c894c // mov qword [rsp + 320], r11 -LBB10_53: +LBB10_35: QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - QUAD $0x000000a02494930f // setae byte [rsp + 160] + QUAD $0x000000f02494930f // setae byte [rsp + 240] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd7930f40 // setae dil + LONG $0xd2930f41 // setae r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd6930f41 // setae r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 - QUAD $0x000001502494930f // setae byte [rsp + 336] + QUAD $0x000001102494930f // setae byte [rsp + 272] LONG $0x206e394c // cmp qword [rsi + 32], r13 - QUAD $0x000000e02494930f // setae byte [rsp + 224] - LONG $0x286e394c // cmp qword [rsi + 40], r13 QUAD $0x000000d02494930f // setae byte [rsp + 208] + LONG $0x286e394c // cmp qword [rsi + 40], r13 + QUAD $0x000000c02494930f // setae byte [rsp + 192] LONG $0x306e394c // cmp qword [rsi + 48], r13 WORD $0x930f; BYTE $0xd0 // setae al LONG $0x386e394c // cmp qword [rsi + 56], r13 WORD $0x930f; BYTE $0xd3 // setae bl LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x000001302494930f // setae byte [rsp + 304] + QUAD $0x000001502494930f // setae byte [rsp + 336] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0xd7930f40 // setae dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd1930f41 // setae r9b + LONG $0xd0930f41 // setae r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd2930f41 // setae r10b + LONG $0xd1930f41 // setae r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd3930f41 // setae r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 LONG $0xd4930f41 // setae r12b LONG $0x706e394c // cmp qword [rsi + 112], r13 - QUAD $0x000001002494930f // setae byte [rsp + 256] + QUAD $0x000001202494930f // setae byte [rsp + 288] LONG $0x786e394c // cmp qword [rsi + 120], r13 WORD $0x930f; BYTE $0xd1 // setae cl LONG $0x80ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 128], r13 - QUAD $0x000000b02494930f // setae byte [rsp + 176] + QUAD $0x000000a02494930f // setae byte [rsp + 160] LONG $0x88ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 136], r13 - QUAD $0x000001102494930f // setae byte [rsp + 272] + QUAD $0x000001302494930f // setae byte [rsp + 304] LONG $0x90ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 144], r13 - QUAD $0x000001202494930f // setae byte [rsp + 288] + QUAD $0x000001002494930f // setae byte [rsp + 256] LONG $0x98ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 152], r13 - QUAD $0x000000f02494930f // setae byte [rsp + 240] + QUAD $0x000000e02494930f // setae byte [rsp + 224] LONG $0xa0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 160], r13 - QUAD $0x000000c02494930f // setae byte [rsp + 192] + QUAD $0x000000b02494930f // setae byte [rsp + 176] LONG $0xa8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 168], r13 - QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 QUAD $0x000000802494930f // setae byte [rsp + 128] LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 @@ -47745,7 +49184,7 @@ LBB10_53: LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 LONG $0x2454930f; BYTE $0x30 // setae byte [rsp + 48] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 - LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + QUAD $0x000000902494930f // setae byte [rsp + 144] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 @@ -47757,113 +49196,114 @@ LBB10_53: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x2454930f; BYTE $0x08 // setae byte [rsp + 8] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd0930f41 // setae r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] + WORD $0x930f; BYTE $0xd2 // setae dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000f024940244 // add r10b, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x0000015024bc0240 // add dil, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000010024bcb60f // movzx edi, byte [rsp + 256] + QUAD $0x0000012024bcb60f // movzx edi, byte [rsp + 288] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] - QUAD $0x000001102494b60f // movzx edx, byte [rsp + 272] - WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - WORD $0xd789 // mov edi, edx - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + QUAD $0x000001302484b60f // movzx eax, byte [rsp + 304] + WORD $0xc000 // add al, al + LONG $0xa0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 160] + WORD $0xc789 // mov edi, eax + QUAD $0x000001002484b60f // movzx eax, byte [rsp + 256] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000e02484b60f // movzx eax, byte [rsp + 224] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] - WORD $0xc900 // add cl, cl - LONG $0x30244c02 // add cl, byte [rsp + 48] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xc000 // add al, al + LONG $0x30244402 // add al, byte [rsp + 48] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c68349 // add r14, 4 QUAD $0x0000014024848348; BYTE $0xff // add qword [rsp + 320], -1 - JNE LBB10_53 + JNE LBB10_35 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x00000170249c8b4c // mov r11, qword [rsp + 368] -LBB10_55: +LBB10_37: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB10_143 + JNE LBB10_155 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_145 + JMP LBB10_40 -LBB10_58: +LBB10_56: WORD $0x8a44; BYTE $0x1a // mov r11b, byte [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -47873,10 +49313,10 @@ LBB10_58: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_62 + JE LBB10_60 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_60: +LBB10_58: WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b LONG $0x01768d48 // lea rsi, [rsi + 1] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -47897,38 +49337,37 @@ LBB10_60: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_60 + JNE LBB10_58 LONG $0x01c68349 // add r14, 1 -LBB10_62: +LBB10_60: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_131 + JL LBB10_61 LONG $0x10ff8349 // cmp r15, 16 LONG $0x245c8844; BYTE $0x08 // mov byte [rsp + 8], r11b LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x000001d024bc894c // mov qword [rsp + 464], r15 - JB LBB10_66 + JB LBB10_63 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB10_194 + JAE LBB10_66 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] WORD $0x3948; BYTE $0xc6 // cmp rsi, rax - JAE LBB10_194 + JAE LBB10_66 -LBB10_66: +LBB10_63: WORD $0xc031 // xor eax, eax QUAD $0x000001a024848948 // mov qword [rsp + 416], rax - QUAD $0x000000d024b4894c // mov qword [rsp + 208], r14 + QUAD $0x000000e024b4894c // mov qword [rsp + 224], r14 -LBB10_67: - WORD $0x894d; BYTE $0xfe // mov r14, r15 - QUAD $0x000001a024b42b4c // sub r14, qword [rsp + 416] - QUAD $0x0000017024b4894c // mov qword [rsp + 368], r14 +LBB10_69: + QUAD $0x000001a024bc2b4c // sub r15, qword [rsp + 416] + QUAD $0x0000017024bc894c // mov qword [rsp + 368], r15 -LBB10_68: +LBB10_70: WORD $0x8948; BYTE $0xf1 // mov rcx, rsi WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b QUAD $0x000001402494930f // setae byte [rsp + 320] @@ -47941,19 +49380,19 @@ LBB10_68: LONG $0xd4930f41 // setae r12b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x04 // cmp byte [rcx + 4], al - QUAD $0x000001502494930f // setae byte [rsp + 336] + QUAD $0x000001102494930f // setae byte [rsp + 272] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x05 // cmp byte [rcx + 5], al QUAD $0x000000802494930f // setae byte [rsp + 128] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x06 // cmp byte [rcx + 6], al - QUAD $0x000000a02494930f // setae byte [rsp + 160] + QUAD $0x000000f02494930f // setae byte [rsp + 240] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x07 // cmp byte [rcx + 7], al LONG $0xd1930f41 // setae r9b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x08 // cmp byte [rcx + 8], al - QUAD $0x000001302494930f // setae byte [rsp + 304] + QUAD $0x000001502494930f // setae byte [rsp + 336] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x09 // cmp byte [rcx + 9], al WORD $0x930f; BYTE $0xd2 // setae dl @@ -47971,28 +49410,28 @@ LBB10_68: LONG $0xd5930f41 // setae r13b LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x0e // cmp byte [rcx + 14], al - QUAD $0x000001002494930f // setae byte [rsp + 256] + QUAD $0x000001202494930f // setae byte [rsp + 288] LONG $0x2444b60f; BYTE $0x08 // movzx eax, byte [rsp + 8] WORD $0x4138; BYTE $0x0f // cmp byte [rcx + 15], al LONG $0xd0930f41 // setae r8b LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x10 // cmp byte [rcx + 16], bl - QUAD $0x000001102494930f // setae byte [rsp + 272] + QUAD $0x000001302494930f // setae byte [rsp + 304] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x11 // cmp byte [rcx + 17], bl - QUAD $0x000001202494930f // setae byte [rsp + 288] + QUAD $0x000001002494930f // setae byte [rsp + 256] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x12 // cmp byte [rcx + 18], bl - QUAD $0x000000e02494930f // setae byte [rsp + 224] + QUAD $0x000000d02494930f // setae byte [rsp + 208] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x13 // cmp byte [rcx + 19], bl - QUAD $0x000000f02494930f // setae byte [rsp + 240] + QUAD $0x000000a02494930f // setae byte [rsp + 160] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x14 // cmp byte [rcx + 20], bl QUAD $0x000000b02494930f // setae byte [rsp + 176] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x15 // cmp byte [rcx + 21], bl - QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x16 // cmp byte [rcx + 22], bl QUAD $0x000000c02494930f // setae byte [rsp + 192] @@ -48001,7 +49440,7 @@ LBB10_68: LONG $0xd3930f41 // setae r11b LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x18 // cmp byte [rcx + 24], bl - LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + QUAD $0x000000902494930f // setae byte [rsp + 144] LONG $0x245cb60f; BYTE $0x08 // movzx ebx, byte [rsp + 8] WORD $0x5938; BYTE $0x19 // cmp byte [rcx + 25], bl LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] @@ -48025,20 +49464,20 @@ LBB10_68: WORD $0x930f; BYTE $0xd3 // setae bl WORD $0x0040; BYTE $0xf6 // add sil, sil QUAD $0x0000014024b40240 // add sil, byte [rsp + 320] - QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + QUAD $0x000000f02484b60f // movzx eax, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 LONG $0x07e1c041 // shl r9b, 7 WORD $0x0841; BYTE $0xc1 // or r9b, al LONG $0x02e7c041 // shl r15b, 2 WORD $0x0841; BYTE $0xf7 // or r15b, sil WORD $0xd200 // add dl, dl - LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] + LONG $0x50249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 336] LONG $0x03e4c041 // shl r12b, 3 WORD $0x0845; BYTE $0xfc // or r12b, r15b LONG $0x7cb60f44; WORD $0x0824 // movzx r15d, byte [rsp + 8] LONG $0x02e7c040 // shl dil, 2 WORD $0x0840; BYTE $0xd7 // or dil, dl - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x04 // shl al, 4 WORD $0x0844; BYTE $0xe0 // or al, r12b LONG $0x03e2c041 // shl r10b, 3 @@ -48050,21 +49489,21 @@ LBB10_68: WORD $0x0845; BYTE $0xd6 // or r14b, r10b LONG $0x05e5c041 // shl r13b, 5 WORD $0x0845; BYTE $0xf5 // or r13b, r14b - QUAD $0x0000010024b4b60f // movzx esi, byte [rsp + 256] + QUAD $0x0000012024b4b60f // movzx esi, byte [rsp + 288] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e0c041 // shl r8b, 7 WORD $0x0841; BYTE $0xf0 // or r8b, sil WORD $0x0841; BYTE $0xd1 // or r9b, dl WORD $0x0845; BYTE $0xe8 // or r8b, r13b - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] + QUAD $0x000001002494b60f // movzx edx, byte [rsp + 256] WORD $0xd200 // add dl, dl - LONG $0x10249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 272] + LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] WORD $0xd689 // mov esi, edx - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] + QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] WORD $0xe2c0; BYTE $0x02 // shl dl, 2 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] + QUAD $0x000000a02494b60f // movzx edx, byte [rsp + 160] WORD $0xe2c0; BYTE $0x03 // shl dl, 3 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx @@ -48072,11 +49511,11 @@ LBB10_68: WORD $0xe2c0; BYTE $0x04 // shl dl, 4 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] + LONG $0x2454b60f; BYTE $0x70 // movzx edx, byte [rsp + 112] WORD $0xe2c0; BYTE $0x05 // shl dl, 5 WORD $0x0840; BYTE $0xf2 // or dl, sil WORD $0xd689 // mov esi, edx - QUAD $0x000000d024948b48 // mov rdx, qword [rsp + 208] + QUAD $0x000000e024948b48 // mov rdx, qword [rsp + 224] WORD $0x8844; BYTE $0x0a // mov byte [rdx], r9b QUAD $0x000000c024bcb60f // movzx edi, byte [rsp + 192] LONG $0x06e7c040 // shl dil, 6 @@ -48086,7 +49525,7 @@ LBB10_68: WORD $0x0841; BYTE $0xf3 // or r11b, sil LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xc000 // add al, al - LONG $0x70244402 // add al, byte [rsp + 112] + LONG $0x90248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 144] WORD $0xc689 // mov esi, eax LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x02 // shl al, 2 @@ -48113,14 +49552,14 @@ LBB10_68: WORD $0x5a88; BYTE $0x03 // mov byte [rdx + 3], bl LONG $0x20718d48 // lea rsi, [rcx + 32] LONG $0x04c28348 // add rdx, 4 - QUAD $0x000000d024948948 // mov qword [rsp + 208], rdx + QUAD $0x000000e024948948 // mov qword [rsp + 224], rdx QUAD $0x0000017024848348; BYTE $0xff // add qword [rsp + 368], -1 - JNE LBB10_68 + JNE LBB10_70 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x000001d024bc8b4c // mov r15, qword [rsp + 464] - JMP LBB10_132 + JMP LBB10_72 -LBB10_70: +LBB10_139: WORD $0x8b44; BYTE $0x2a // mov r13d, dword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -48130,10 +49569,10 @@ LBB10_70: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_74 + JE LBB10_143 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_72: +LBB10_141: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x04768d48 // lea rsi, [rsi + 4] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -48154,63 +49593,63 @@ LBB10_72: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_72 + JNE LBB10_141 LONG $0x01c68349 // add r14, 1 -LBB10_74: +LBB10_143: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_78 + JL LBB10_147 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x00000170249c894c // mov qword [rsp + 368], r11 QUAD $0x00000140249c894c // mov qword [rsp + 320], r11 -LBB10_76: +LBB10_145: QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - QUAD $0x000000a024949d0f // setge byte [rsp + 160] + QUAD $0x000000f024949d0f // setge byte [rsp + 240] LONG $0x046e3944 // cmp dword [rsi + 4], r13d - LONG $0xd79d0f40 // setge dil + LONG $0xd29d0f41 // setge r10b LONG $0x086e3944 // cmp dword [rsi + 8], r13d LONG $0xd69d0f41 // setge r14b LONG $0x0c6e3944 // cmp dword [rsi + 12], r13d - QUAD $0x0000015024949d0f // setge byte [rsp + 336] + QUAD $0x0000011024949d0f // setge byte [rsp + 272] LONG $0x106e3944 // cmp dword [rsi + 16], r13d - QUAD $0x000000e024949d0f // setge byte [rsp + 224] - LONG $0x146e3944 // cmp dword [rsi + 20], r13d QUAD $0x000000d024949d0f // setge byte [rsp + 208] + LONG $0x146e3944 // cmp dword [rsi + 20], r13d + QUAD $0x000000c024949d0f // setge byte [rsp + 192] LONG $0x186e3944 // cmp dword [rsi + 24], r13d WORD $0x9d0f; BYTE $0xd0 // setge al LONG $0x1c6e3944 // cmp dword [rsi + 28], r13d WORD $0x9d0f; BYTE $0xd3 // setge bl LONG $0x206e3944 // cmp dword [rsi + 32], r13d - QUAD $0x0000013024949d0f // setge byte [rsp + 304] + QUAD $0x0000015024949d0f // setge byte [rsp + 336] LONG $0x246e3944 // cmp dword [rsi + 36], r13d - WORD $0x9d0f; BYTE $0xd2 // setge dl + LONG $0xd79d0f40 // setge dil LONG $0x286e3944 // cmp dword [rsi + 40], r13d - LONG $0xd19d0f41 // setge r9b + LONG $0xd09d0f41 // setge r8b LONG $0x2c6e3944 // cmp dword [rsi + 44], r13d - LONG $0xd29d0f41 // setge r10b + LONG $0xd19d0f41 // setge r9b LONG $0x306e3944 // cmp dword [rsi + 48], r13d LONG $0xd39d0f41 // setge r11b LONG $0x346e3944 // cmp dword [rsi + 52], r13d LONG $0xd49d0f41 // setge r12b LONG $0x386e3944 // cmp dword [rsi + 56], r13d - QUAD $0x0000010024949d0f // setge byte [rsp + 256] + QUAD $0x0000012024949d0f // setge byte [rsp + 288] LONG $0x3c6e3944 // cmp dword [rsi + 60], r13d WORD $0x9d0f; BYTE $0xd1 // setge cl LONG $0x406e3944 // cmp dword [rsi + 64], r13d - QUAD $0x000000b024949d0f // setge byte [rsp + 176] + QUAD $0x000000a024949d0f // setge byte [rsp + 160] LONG $0x446e3944 // cmp dword [rsi + 68], r13d - QUAD $0x0000011024949d0f // setge byte [rsp + 272] + QUAD $0x0000013024949d0f // setge byte [rsp + 304] LONG $0x486e3944 // cmp dword [rsi + 72], r13d - QUAD $0x0000012024949d0f // setge byte [rsp + 288] + QUAD $0x0000010024949d0f // setge byte [rsp + 256] LONG $0x4c6e3944 // cmp dword [rsi + 76], r13d - QUAD $0x000000f024949d0f // setge byte [rsp + 240] + QUAD $0x000000e024949d0f // setge byte [rsp + 224] LONG $0x506e3944 // cmp dword [rsi + 80], r13d - QUAD $0x000000c024949d0f // setge byte [rsp + 192] + QUAD $0x000000b024949d0f // setge byte [rsp + 176] LONG $0x546e3944 // cmp dword [rsi + 84], r13d - QUAD $0x0000009024949d0f // setge byte [rsp + 144] + LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] LONG $0x586e3944 // cmp dword [rsi + 88], r13d QUAD $0x0000008024949d0f // setge byte [rsp + 128] LONG $0x5c6e3944 // cmp dword [rsi + 92], r13d @@ -48218,7 +49657,7 @@ LBB10_76: LONG $0x606e3944 // cmp dword [rsi + 96], r13d LONG $0x24549d0f; BYTE $0x30 // setge byte [rsp + 48] LONG $0x646e3944 // cmp dword [rsi + 100], r13d - LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] + QUAD $0x0000009024949d0f // setge byte [rsp + 144] LONG $0x686e3944 // cmp dword [rsi + 104], r13d LONG $0x24549d0f; BYTE $0x60 // setge byte [rsp + 96] LONG $0x6c6e3944 // cmp dword [rsi + 108], r13d @@ -48230,113 +49669,114 @@ LBB10_76: LONG $0x786e3944 // cmp dword [rsi + 120], r13d LONG $0x24549d0f; BYTE $0x08 // setge byte [rsp + 8] LONG $0x7c6e3944 // cmp dword [rsi + 124], r13d - LONG $0xd09d0f41 // setge r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] + WORD $0x9d0f; BYTE $0xd2 // setge dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000f024940244 // add r10b, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x0000015024bc0240 // add dil, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000010024bcb60f // movzx edi, byte [rsp + 256] + QUAD $0x0000012024bcb60f // movzx edi, byte [rsp + 288] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] - QUAD $0x000001102494b60f // movzx edx, byte [rsp + 272] - WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - WORD $0xd789 // mov edi, edx - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + QUAD $0x000001302484b60f // movzx eax, byte [rsp + 304] + WORD $0xc000 // add al, al + LONG $0xa0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 160] + WORD $0xc789 // mov edi, eax + QUAD $0x000001002484b60f // movzx eax, byte [rsp + 256] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000e02484b60f // movzx eax, byte [rsp + 224] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] - WORD $0xc900 // add cl, cl - LONG $0x30244c02 // add cl, byte [rsp + 48] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xc000 // add al, al + LONG $0x30244402 // add al, byte [rsp + 48] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x80c68148; WORD $0x0000; BYTE $0x00 // add rsi, 128 LONG $0x04c68349 // add r14, 4 QUAD $0x0000014024848348; BYTE $0xff // add qword [rsp + 320], -1 - JNE LBB10_76 + JNE LBB10_145 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x00000170249c8b4c // mov r11, qword [rsp + 368] -LBB10_78: +LBB10_147: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB10_147 + JNE LBB10_153 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_149 + JMP LBB10_150 -LBB10_81: +LBB10_99: LONG $0x2ab70f44 // movzx r13d, word [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -48346,10 +49786,10 @@ LBB10_81: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_85 + JE LBB10_103 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_83: +LBB10_101: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x02768d48 // lea rsi, [rsi + 2] LONG $0x000000ba; BYTE $0x00 // mov edx, 0 @@ -48370,63 +49810,63 @@ LBB10_83: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_83 + JNE LBB10_101 LONG $0x01c68349 // add r14, 1 -LBB10_85: +LBB10_103: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_89 + JL LBB10_107 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x00000170249c894c // mov qword [rsp + 368], r11 QUAD $0x00000140249c894c // mov qword [rsp + 320], r11 -LBB10_87: +LBB10_105: QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 LONG $0x2e394466 // cmp word [rsi], r13w - QUAD $0x000000a02494930f // setae byte [rsp + 160] + QUAD $0x000000f02494930f // setae byte [rsp + 240] LONG $0x6e394466; BYTE $0x02 // cmp word [rsi + 2], r13w - LONG $0xd7930f40 // setae dil + LONG $0xd2930f41 // setae r10b LONG $0x6e394466; BYTE $0x04 // cmp word [rsi + 4], r13w LONG $0xd6930f41 // setae r14b LONG $0x6e394466; BYTE $0x06 // cmp word [rsi + 6], r13w - QUAD $0x000001502494930f // setae byte [rsp + 336] + QUAD $0x000001102494930f // setae byte [rsp + 272] LONG $0x6e394466; BYTE $0x08 // cmp word [rsi + 8], r13w - QUAD $0x000000e02494930f // setae byte [rsp + 224] - LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w QUAD $0x000000d02494930f // setae byte [rsp + 208] + LONG $0x6e394466; BYTE $0x0a // cmp word [rsi + 10], r13w + QUAD $0x000000c02494930f // setae byte [rsp + 192] LONG $0x6e394466; BYTE $0x0c // cmp word [rsi + 12], r13w WORD $0x930f; BYTE $0xd0 // setae al LONG $0x6e394466; BYTE $0x0e // cmp word [rsi + 14], r13w WORD $0x930f; BYTE $0xd3 // setae bl LONG $0x6e394466; BYTE $0x10 // cmp word [rsi + 16], r13w - QUAD $0x000001302494930f // setae byte [rsp + 304] + QUAD $0x000001502494930f // setae byte [rsp + 336] LONG $0x6e394466; BYTE $0x12 // cmp word [rsi + 18], r13w - WORD $0x930f; BYTE $0xd2 // setae dl + LONG $0xd7930f40 // setae dil LONG $0x6e394466; BYTE $0x14 // cmp word [rsi + 20], r13w - LONG $0xd1930f41 // setae r9b + LONG $0xd0930f41 // setae r8b LONG $0x6e394466; BYTE $0x16 // cmp word [rsi + 22], r13w - LONG $0xd2930f41 // setae r10b + LONG $0xd1930f41 // setae r9b LONG $0x6e394466; BYTE $0x18 // cmp word [rsi + 24], r13w LONG $0xd3930f41 // setae r11b LONG $0x6e394466; BYTE $0x1a // cmp word [rsi + 26], r13w LONG $0xd4930f41 // setae r12b LONG $0x6e394466; BYTE $0x1c // cmp word [rsi + 28], r13w - QUAD $0x000001002494930f // setae byte [rsp + 256] + QUAD $0x000001202494930f // setae byte [rsp + 288] LONG $0x6e394466; BYTE $0x1e // cmp word [rsi + 30], r13w WORD $0x930f; BYTE $0xd1 // setae cl LONG $0x6e394466; BYTE $0x20 // cmp word [rsi + 32], r13w - QUAD $0x000000b02494930f // setae byte [rsp + 176] + QUAD $0x000000a02494930f // setae byte [rsp + 160] LONG $0x6e394466; BYTE $0x22 // cmp word [rsi + 34], r13w - QUAD $0x000001102494930f // setae byte [rsp + 272] + QUAD $0x000001302494930f // setae byte [rsp + 304] LONG $0x6e394466; BYTE $0x24 // cmp word [rsi + 36], r13w - QUAD $0x000001202494930f // setae byte [rsp + 288] + QUAD $0x000001002494930f // setae byte [rsp + 256] LONG $0x6e394466; BYTE $0x26 // cmp word [rsi + 38], r13w - QUAD $0x000000f02494930f // setae byte [rsp + 240] + QUAD $0x000000e02494930f // setae byte [rsp + 224] LONG $0x6e394466; BYTE $0x28 // cmp word [rsi + 40], r13w - QUAD $0x000000c02494930f // setae byte [rsp + 192] + QUAD $0x000000b02494930f // setae byte [rsp + 176] LONG $0x6e394466; BYTE $0x2a // cmp word [rsi + 42], r13w - QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] LONG $0x6e394466; BYTE $0x2c // cmp word [rsi + 44], r13w QUAD $0x000000802494930f // setae byte [rsp + 128] LONG $0x6e394466; BYTE $0x2e // cmp word [rsi + 46], r13w @@ -48434,7 +49874,7 @@ LBB10_87: LONG $0x6e394466; BYTE $0x30 // cmp word [rsi + 48], r13w LONG $0x2454930f; BYTE $0x30 // setae byte [rsp + 48] LONG $0x6e394466; BYTE $0x32 // cmp word [rsi + 50], r13w - LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + QUAD $0x000000902494930f // setae byte [rsp + 144] LONG $0x6e394466; BYTE $0x34 // cmp word [rsi + 52], r13w LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] LONG $0x6e394466; BYTE $0x36 // cmp word [rsi + 54], r13w @@ -48446,113 +49886,114 @@ LBB10_87: LONG $0x6e394466; BYTE $0x3c // cmp word [rsi + 60], r13w LONG $0x2454930f; BYTE $0x08 // setae byte [rsp + 8] LONG $0x6e394466; BYTE $0x3e // cmp word [rsi + 62], r13w - LONG $0xd0930f41 // setae r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] + WORD $0x930f; BYTE $0xd2 // setae dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000f024940244 // add r10b, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x0000015024bc0240 // add dil, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000010024bcb60f // movzx edi, byte [rsp + 256] + QUAD $0x0000012024bcb60f // movzx edi, byte [rsp + 288] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] - QUAD $0x000001102494b60f // movzx edx, byte [rsp + 272] - WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - WORD $0xd789 // mov edi, edx - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + QUAD $0x000001302484b60f // movzx eax, byte [rsp + 304] + WORD $0xc000 // add al, al + LONG $0xa0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 160] + WORD $0xc789 // mov edi, eax + QUAD $0x000001002484b60f // movzx eax, byte [rsp + 256] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000e02484b60f // movzx eax, byte [rsp + 224] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] - WORD $0xc900 // add cl, cl - LONG $0x30244c02 // add cl, byte [rsp + 48] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xc000 // add al, al + LONG $0x30244402 // add al, byte [rsp + 48] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x40c68348 // add rsi, 64 LONG $0x04c68349 // add r14, 4 QUAD $0x0000014024848348; BYTE $0xff // add qword [rsp + 320], -1 - JNE LBB10_87 + JNE LBB10_105 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x00000170249c8b4c // mov r11, qword [rsp + 368] -LBB10_89: +LBB10_107: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB10_170 + JNE LBB10_112 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_172 + JMP LBB10_110 -LBB10_92: +LBB10_114: LONG $0x1ab70f44 // movzx r11d, word [rdx] LONG $0x1f7a8d4d // lea r15, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -48562,10 +50003,10 @@ LBB10_92: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_96 + JE LBB10_118 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_94: +LBB10_116: LONG $0x1e394466 // cmp word [rsi], r11w LONG $0x02768d48 // lea rsi, [rsi + 2] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -48586,59 +50027,58 @@ LBB10_94: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_94 + JNE LBB10_116 LONG $0x01c68349 // add r14, 1 -LBB10_96: +LBB10_118: LONG $0x05ffc149 // sar r15, 5 LONG $0x20fa8349 // cmp r10, 32 QUAD $0x00000188249c8944 // mov dword [rsp + 392], r11d - JL LBB10_135 + JL LBB10_119 LONG $0x08ff8349 // cmp r15, 8 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x000001c824bc894c // mov qword [rsp + 456], r15 - JB LBB10_100 + JB LBB10_121 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x06e0c148 // shl rax, 6 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB10_197 + JAE LBB10_124 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB10_197 + JBE LBB10_124 -LBB10_100: +LBB10_121: WORD $0xc031 // xor eax, eax QUAD $0x0000019024848948 // mov qword [rsp + 400], rax WORD $0x8949; BYTE $0xf3 // mov r11, rsi WORD $0x894d; BYTE $0xf4 // mov r12, r14 -LBB10_101: +LBB10_127: LONG $0x2464894c; BYTE $0x08 // mov qword [rsp + 8], r12 - WORD $0x894d; BYTE $0xfe // mov r14, r15 - QUAD $0x0000019024b42b4c // sub r14, qword [rsp + 400] - QUAD $0x0000014024b4894c // mov qword [rsp + 320], r14 + QUAD $0x0000019024bc2b4c // sub r15, qword [rsp + 400] + QUAD $0x0000014024bc894c // mov qword [rsp + 320], r15 QUAD $0x0000018824ac8b44 // mov r13d, dword [rsp + 392] -LBB10_102: +LBB10_128: LONG $0x2b394566 // cmp word [r11], r13w - QUAD $0x000000a024949d0f // setge byte [rsp + 160] + QUAD $0x000000f024949d0f // setge byte [rsp + 240] LONG $0x6b394566; BYTE $0x02 // cmp word [r11 + 2], r13w LONG $0xd09d0f41 // setge r8b LONG $0x6b394566; BYTE $0x04 // cmp word [r11 + 4], r13w LONG $0xd69d0f41 // setge r14b LONG $0x6b394566; BYTE $0x06 // cmp word [r11 + 6], r13w - QUAD $0x0000015024949d0f // setge byte [rsp + 336] + QUAD $0x0000011024949d0f // setge byte [rsp + 272] LONG $0x6b394566; BYTE $0x08 // cmp word [r11 + 8], r13w - QUAD $0x000000e024949d0f // setge byte [rsp + 224] - LONG $0x6b394566; BYTE $0x0a // cmp word [r11 + 10], r13w QUAD $0x000000d024949d0f // setge byte [rsp + 208] + LONG $0x6b394566; BYTE $0x0a // cmp word [r11 + 10], r13w + QUAD $0x000000c024949d0f // setge byte [rsp + 192] LONG $0x6b394566; BYTE $0x0c // cmp word [r11 + 12], r13w WORD $0x9d0f; BYTE $0xd0 // setge al LONG $0x6b394566; BYTE $0x0e // cmp word [r11 + 14], r13w WORD $0x9d0f; BYTE $0xd3 // setge bl LONG $0x6b394566; BYTE $0x10 // cmp word [r11 + 16], r13w - QUAD $0x0000013024949d0f // setge byte [rsp + 304] + QUAD $0x0000015024949d0f // setge byte [rsp + 336] LONG $0x6b394566; BYTE $0x12 // cmp word [r11 + 18], r13w WORD $0x9d0f; BYTE $0xd1 // setge cl LONG $0x6b394566; BYTE $0x14 // cmp word [r11 + 20], r13w @@ -48650,21 +50090,21 @@ LBB10_102: LONG $0x6b394566; BYTE $0x1a // cmp word [r11 + 26], r13w LONG $0xd49d0f41 // setge r12b LONG $0x6b394566; BYTE $0x1c // cmp word [r11 + 28], r13w - QUAD $0x0000010024949d0f // setge byte [rsp + 256] + QUAD $0x0000012024949d0f // setge byte [rsp + 288] LONG $0x6b394566; BYTE $0x1e // cmp word [r11 + 30], r13w LONG $0xd79d0f40 // setge dil LONG $0x6b394566; BYTE $0x20 // cmp word [r11 + 32], r13w - QUAD $0x000000b024949d0f // setge byte [rsp + 176] + QUAD $0x000000a024949d0f // setge byte [rsp + 160] LONG $0x6b394566; BYTE $0x22 // cmp word [r11 + 34], r13w - QUAD $0x0000011024949d0f // setge byte [rsp + 272] + QUAD $0x0000013024949d0f // setge byte [rsp + 304] LONG $0x6b394566; BYTE $0x24 // cmp word [r11 + 36], r13w - QUAD $0x0000012024949d0f // setge byte [rsp + 288] + QUAD $0x0000010024949d0f // setge byte [rsp + 256] LONG $0x6b394566; BYTE $0x26 // cmp word [r11 + 38], r13w - QUAD $0x000000f024949d0f // setge byte [rsp + 240] + QUAD $0x000000e024949d0f // setge byte [rsp + 224] LONG $0x6b394566; BYTE $0x28 // cmp word [r11 + 40], r13w - QUAD $0x000000c024949d0f // setge byte [rsp + 192] + QUAD $0x000000b024949d0f // setge byte [rsp + 176] LONG $0x6b394566; BYTE $0x2a // cmp word [r11 + 42], r13w - QUAD $0x0000009024949d0f // setge byte [rsp + 144] + LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] LONG $0x6b394566; BYTE $0x2c // cmp word [r11 + 44], r13w QUAD $0x0000008024949d0f // setge byte [rsp + 128] LONG $0x6b394566; BYTE $0x2e // cmp word [r11 + 46], r13w @@ -48672,7 +50112,7 @@ LBB10_102: LONG $0x6b394566; BYTE $0x30 // cmp word [r11 + 48], r13w LONG $0x24549d0f; BYTE $0x30 // setge byte [rsp + 48] LONG $0x6b394566; BYTE $0x32 // cmp word [r11 + 50], r13w - LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] + QUAD $0x0000009024949d0f // setge byte [rsp + 144] LONG $0x6b394566; BYTE $0x34 // cmp word [r11 + 52], r13w LONG $0x24549d0f; BYTE $0x60 // setge byte [rsp + 96] LONG $0x6b394566; BYTE $0x36 // cmp word [r11 + 54], r13w @@ -48686,55 +50126,55 @@ LBB10_102: LONG $0x6b394566; BYTE $0x3e // cmp word [r11 + 62], r13w WORD $0x9d0f; BYTE $0xd2 // setge dl WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x000000a024840244 // add r8b, byte [rsp + 160] + QUAD $0x000000f024840244 // add r8b, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 WORD $0x0845; BYTE $0xc6 // or r14b, r8b WORD $0xc900 // add cl, cl - LONG $0x30248c02; WORD $0x0001; BYTE $0x00 // add cl, byte [rsp + 304] - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + LONG $0x50248c02; WORD $0x0001; BYTE $0x00 // add cl, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b LONG $0x02e6c040 // shl sil, 2 WORD $0x0840; BYTE $0xce // or sil, cl - QUAD $0x000000e0248cb60f // movzx ecx, byte [rsp + 224] + QUAD $0x000000d0248cb60f // movzx ecx, byte [rsp + 208] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0xc108 // or cl, al WORD $0x8941; BYTE $0xc8 // mov r8d, ecx LONG $0x03e1c041 // shl r9b, 3 WORD $0x0841; BYTE $0xf1 // or r9b, sil - QUAD $0x000000d0248cb60f // movzx ecx, byte [rsp + 208] + QUAD $0x000000c0248cb60f // movzx ecx, byte [rsp + 192] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0844; BYTE $0xc1 // or cl, r8b LONG $0x04e2c041 // shl r10b, 4 WORD $0x0845; BYTE $0xca // or r10b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x0000010024b4b60f // movzx esi, byte [rsp + 256] + QUAD $0x0000012024b4b60f // movzx esi, byte [rsp + 288] LONG $0x06e6c040 // shl sil, 6 LONG $0x07e7c040 // shl dil, 7 WORD $0x0840; BYTE $0xf7 // or dil, sil WORD $0xcb08 // or bl, cl WORD $0x0844; BYTE $0xe7 // or dil, r12b - QUAD $0x00000110248cb60f // movzx ecx, byte [rsp + 272] + QUAD $0x00000130248cb60f // movzx ecx, byte [rsp + 304] WORD $0xc900 // add cl, cl - LONG $0xb0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 176] + LONG $0xa0248c02; WORD $0x0000; BYTE $0x00 // add cl, byte [rsp + 160] WORD $0xce89 // mov esi, ecx - QUAD $0x00000120248cb60f // movzx ecx, byte [rsp + 288] + QUAD $0x00000100248cb60f // movzx ecx, byte [rsp + 256] WORD $0xe1c0; BYTE $0x02 // shl cl, 2 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - QUAD $0x000000f0248cb60f // movzx ecx, byte [rsp + 240] + QUAD $0x000000e0248cb60f // movzx ecx, byte [rsp + 224] WORD $0xe1c0; BYTE $0x03 // shl cl, 3 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - QUAD $0x000000c0248cb60f // movzx ecx, byte [rsp + 192] + QUAD $0x000000b0248cb60f // movzx ecx, byte [rsp + 176] WORD $0xe1c0; BYTE $0x04 // shl cl, 4 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx - QUAD $0x00000090248cb60f // movzx ecx, byte [rsp + 144] + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] WORD $0xe1c0; BYTE $0x05 // shl cl, 5 WORD $0x0840; BYTE $0xf1 // or cl, sil WORD $0xce89 // mov esi, ecx @@ -48746,7 +50186,7 @@ LBB10_102: WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x01798840 // mov byte [rcx + 1], dil WORD $0x0841; BYTE $0xf7 // or r15b, sil - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] WORD $0xc000 // add al, al LONG $0x30244402 // add al, byte [rsp + 48] WORD $0xc389 // mov ebx, eax @@ -48776,13 +50216,13 @@ LBB10_102: LONG $0x04c18348 // add rcx, 4 LONG $0x244c8948; BYTE $0x08 // mov qword [rsp + 8], rcx QUAD $0x0000014024848348; BYTE $0xff // add qword [rsp + 320], -1 - JNE LBB10_102 + JNE LBB10_128 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x000001c824bc8b4c // mov r15, qword [rsp + 456] LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] - JMP LBB10_136 + JMP LBB10_130 -LBB10_104: +LBB10_157: WORD $0x8b4c; BYTE $0x2a // mov r13, qword [rdx] LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 @@ -48792,10 +50232,10 @@ LBB10_104: LONG $0xc1490f41 // cmovns eax, r9d WORD $0xe083; BYTE $0xf8 // and eax, -8 WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_108 + JE LBB10_161 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_106: +LBB10_159: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x08768d48 // lea rsi, [rsi + 8] WORD $0x9d0f; BYTE $0xd2 // setge dl @@ -48816,63 +50256,63 @@ LBB10_106: LONG $0x1e3c8841 // mov byte [r14 + rbx], dil LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_106 + JNE LBB10_159 LONG $0x01c68349 // add r14, 1 -LBB10_108: +LBB10_161: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_112 + JL LBB10_165 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x00000170249c894c // mov qword [rsp + 368], r11 QUAD $0x00000140249c894c // mov qword [rsp + 320], r11 -LBB10_110: +LBB10_163: QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - QUAD $0x000000a024949d0f // setge byte [rsp + 160] + QUAD $0x000000f024949d0f // setge byte [rsp + 240] LONG $0x086e394c // cmp qword [rsi + 8], r13 - LONG $0xd79d0f40 // setge dil + LONG $0xd29d0f41 // setge r10b LONG $0x106e394c // cmp qword [rsi + 16], r13 LONG $0xd69d0f41 // setge r14b LONG $0x186e394c // cmp qword [rsi + 24], r13 - QUAD $0x0000015024949d0f // setge byte [rsp + 336] + QUAD $0x0000011024949d0f // setge byte [rsp + 272] LONG $0x206e394c // cmp qword [rsi + 32], r13 - QUAD $0x000000e024949d0f // setge byte [rsp + 224] - LONG $0x286e394c // cmp qword [rsi + 40], r13 QUAD $0x000000d024949d0f // setge byte [rsp + 208] + LONG $0x286e394c // cmp qword [rsi + 40], r13 + QUAD $0x000000c024949d0f // setge byte [rsp + 192] LONG $0x306e394c // cmp qword [rsi + 48], r13 WORD $0x9d0f; BYTE $0xd0 // setge al LONG $0x386e394c // cmp qword [rsi + 56], r13 WORD $0x9d0f; BYTE $0xd3 // setge bl LONG $0x406e394c // cmp qword [rsi + 64], r13 - QUAD $0x0000013024949d0f // setge byte [rsp + 304] + QUAD $0x0000015024949d0f // setge byte [rsp + 336] LONG $0x486e394c // cmp qword [rsi + 72], r13 - WORD $0x9d0f; BYTE $0xd2 // setge dl + LONG $0xd79d0f40 // setge dil LONG $0x506e394c // cmp qword [rsi + 80], r13 - LONG $0xd19d0f41 // setge r9b + LONG $0xd09d0f41 // setge r8b LONG $0x586e394c // cmp qword [rsi + 88], r13 - LONG $0xd29d0f41 // setge r10b + LONG $0xd19d0f41 // setge r9b LONG $0x606e394c // cmp qword [rsi + 96], r13 LONG $0xd39d0f41 // setge r11b LONG $0x686e394c // cmp qword [rsi + 104], r13 LONG $0xd49d0f41 // setge r12b LONG $0x706e394c // cmp qword [rsi + 112], r13 - QUAD $0x0000010024949d0f // setge byte [rsp + 256] + QUAD $0x0000012024949d0f // setge byte [rsp + 288] LONG $0x786e394c // cmp qword [rsi + 120], r13 WORD $0x9d0f; BYTE $0xd1 // setge cl LONG $0x80ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 128], r13 - QUAD $0x000000b024949d0f // setge byte [rsp + 176] + QUAD $0x000000a024949d0f // setge byte [rsp + 160] LONG $0x88ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 136], r13 - QUAD $0x0000011024949d0f // setge byte [rsp + 272] + QUAD $0x0000013024949d0f // setge byte [rsp + 304] LONG $0x90ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 144], r13 - QUAD $0x0000012024949d0f // setge byte [rsp + 288] + QUAD $0x0000010024949d0f // setge byte [rsp + 256] LONG $0x98ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 152], r13 - QUAD $0x000000f024949d0f // setge byte [rsp + 240] + QUAD $0x000000e024949d0f // setge byte [rsp + 224] LONG $0xa0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 160], r13 - QUAD $0x000000c024949d0f // setge byte [rsp + 192] + QUAD $0x000000b024949d0f // setge byte [rsp + 176] LONG $0xa8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 168], r13 - QUAD $0x0000009024949d0f // setge byte [rsp + 144] + LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] LONG $0xb0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 176], r13 QUAD $0x0000008024949d0f // setge byte [rsp + 128] LONG $0xb8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 184], r13 @@ -48880,7 +50320,7 @@ LBB10_110: LONG $0xc0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 192], r13 LONG $0x24549d0f; BYTE $0x30 // setge byte [rsp + 48] LONG $0xc8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 200], r13 - LONG $0x24549d0f; BYTE $0x70 // setge byte [rsp + 112] + QUAD $0x0000009024949d0f // setge byte [rsp + 144] LONG $0xd0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 208], r13 LONG $0x24549d0f; BYTE $0x60 // setge byte [rsp + 96] LONG $0xd8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 216], r13 @@ -48892,113 +50332,114 @@ LBB10_110: LONG $0xf0ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 240], r13 LONG $0x24549d0f; BYTE $0x08 // setge byte [rsp + 8] LONG $0xf8ae394c; WORD $0x0000; BYTE $0x00 // cmp qword [rsi + 248], r13 - LONG $0xd09d0f41 // setge r8b - WORD $0x0040; BYTE $0xff // add dil, dil - QUAD $0x000000a024bc0240 // add dil, byte [rsp + 160] + WORD $0x9d0f; BYTE $0xd2 // setge dl + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x000000f024940244 // add r10b, byte [rsp + 240] WORD $0xe0c0; BYTE $0x06 // shl al, 6 WORD $0xe3c0; BYTE $0x07 // shl bl, 7 WORD $0xc308 // or bl, al LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0841; BYTE $0xfe // or r14b, dil - WORD $0xd200 // add dl, dl - LONG $0x30249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 304] - QUAD $0x000001502484b60f // movzx eax, byte [rsp + 336] + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + WORD $0x0040; BYTE $0xff // add dil, dil + QUAD $0x0000015024bc0240 // add dil, byte [rsp + 336] + QUAD $0x000001102484b60f // movzx eax, byte [rsp + 272] WORD $0xe0c0; BYTE $0x03 // shl al, 3 WORD $0x0844; BYTE $0xf0 // or al, r14b - LONG $0x02e1c041 // shl r9b, 2 - WORD $0x0841; BYTE $0xd1 // or r9b, dl - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0xc208 // or dl, al - WORD $0xd789 // mov edi, edx - LONG $0x03e2c041 // shl r10b, 3 - WORD $0x0845; BYTE $0xca // or r10b, r9b - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + WORD $0x8941; BYTE $0xc2 // mov r10d, eax + QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] + LONG $0x02e0c041 // shl r8b, 2 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x000000d02484b60f // movzx eax, byte [rsp + 208] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xd0 // or al, r10b + WORD $0xc789 // mov edi, eax + LONG $0x03e1c041 // shl r9b, 3 + WORD $0x0845; BYTE $0xc1 // or r9b, r8b + QUAD $0x000000c02484b60f // movzx eax, byte [rsp + 192] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil LONG $0x04e3c041 // shl r11b, 4 - WORD $0x0845; BYTE $0xd3 // or r11b, r10b + WORD $0x0845; BYTE $0xcb // or r11b, r9b LONG $0x05e4c041 // shl r12b, 5 WORD $0x0845; BYTE $0xdc // or r12b, r11b - QUAD $0x0000010024bcb60f // movzx edi, byte [rsp + 256] + QUAD $0x0000012024bcb60f // movzx edi, byte [rsp + 288] LONG $0x06e7c040 // shl dil, 6 WORD $0xe1c0; BYTE $0x07 // shl cl, 7 WORD $0x0840; BYTE $0xf9 // or cl, dil - WORD $0xd308 // or bl, dl + WORD $0xc308 // or bl, al WORD $0x0844; BYTE $0xe1 // or cl, r12b - QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] - QUAD $0x000001102494b60f // movzx edx, byte [rsp + 272] - WORD $0xd200 // add dl, dl - LONG $0xb0249402; WORD $0x0000; BYTE $0x00 // add dl, byte [rsp + 176] - WORD $0xd789 // mov edi, edx - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xfa // or dl, dil - WORD $0xd789 // mov edi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xfa // or dl, dil + QUAD $0x000001302484b60f // movzx eax, byte [rsp + 304] + WORD $0xc000 // add al, al + LONG $0xa0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 160] + WORD $0xc789 // mov edi, eax + QUAD $0x000001002484b60f // movzx eax, byte [rsp + 256] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000e02484b60f // movzx eax, byte [rsp + 224] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0840; BYTE $0xf8 // or al, dil + WORD $0xc789 // mov edi, eax + LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0840; BYTE $0xf8 // or al, dil WORD $0x8841; BYTE $0x1e // mov byte [r14], bl QUAD $0x00000080249cb60f // movzx ebx, byte [rsp + 128] WORD $0xe3c0; BYTE $0x06 // shl bl, 6 LONG $0x07e7c041 // shl r15b, 7 WORD $0x0841; BYTE $0xdf // or r15b, bl LONG $0x014e8841 // mov byte [r14 + 1], cl - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] - WORD $0xc900 // add cl, cl - LONG $0x30244c02 // add cl, byte [rsp + 48] - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x60 // movzx ecx, byte [rsp + 96] - WORD $0xe1c0; BYTE $0x02 // shl cl, 2 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x50 // movzx ecx, byte [rsp + 80] - WORD $0xe1c0; BYTE $0x03 // shl cl, 3 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x20 // movzx ecx, byte [rsp + 32] - WORD $0xe1c0; BYTE $0x04 // shl cl, 4 - WORD $0xd108 // or cl, dl - WORD $0xca89 // mov edx, ecx - LONG $0x244cb60f; BYTE $0x10 // movzx ecx, byte [rsp + 16] - WORD $0xe1c0; BYTE $0x05 // shl cl, 5 - WORD $0xd108 // or cl, dl - LONG $0x2454b60f; BYTE $0x08 // movzx edx, byte [rsp + 8] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - LONG $0x07e0c041 // shl r8b, 7 - WORD $0x0841; BYTE $0xd0 // or r8b, dl - WORD $0x0841; BYTE $0xc8 // or r8b, cl + WORD $0x0841; BYTE $0xc7 // or r15b, al + QUAD $0x000000902484b60f // movzx eax, byte [rsp + 144] + WORD $0xc000 // add al, al + LONG $0x30244402 // add al, byte [rsp + 48] + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] + WORD $0xe0c0; BYTE $0x02 // shl al, 2 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] + WORD $0xe0c0; BYTE $0x03 // shl al, 3 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0xc808 // or al, cl + LONG $0x244cb60f; BYTE $0x08 // movzx ecx, byte [rsp + 8] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe2c0; BYTE $0x07 // shl dl, 7 + WORD $0xca08 // or dl, cl + WORD $0xc208 // or dl, al LONG $0x027e8845 // mov byte [r14 + 2], r15b - LONG $0x03468845 // mov byte [r14 + 3], r8b + LONG $0x03568841 // mov byte [r14 + 3], dl LONG $0x00c68148; WORD $0x0001; BYTE $0x00 // add rsi, 256 LONG $0x04c68349 // add r14, 4 QUAD $0x0000014024848348; BYTE $0xff // add qword [rsp + 320], -1 - JNE LBB10_110 + JNE LBB10_163 LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x00000170249c8b4c // mov r11, qword [rsp + 368] -LBB10_112: +LBB10_165: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB10_175 + JNE LBB10_170 WORD $0x3145; BYTE $0xdb // xor r11d, r11d - JMP LBB10_177 + JMP LBB10_168 -LBB10_115: +LBB10_172: LONG $0x1f5a8d4d // lea r11, [r10 + 31] WORD $0x854d; BYTE $0xd2 // test r10, r10 LONG $0xda490f4d // cmovns r11, r10 @@ -49008,14 +50449,15 @@ LBB10_115: WORD $0xe083; BYTE $0xf8 // and eax, -8 LONG $0x100f44f3; BYTE $0x1a // movss xmm11, dword [rdx] WORD $0x2941; BYTE $0xc1 // sub r9d, eax - JE LBB10_119 + JE LBB10_176 WORD $0x6349; BYTE $0xc1 // movsxd rax, r9d -LBB10_117: - LONG $0x1e2e0f44 // ucomiss xmm11, dword [rsi] - WORD $0x960f; BYTE $0xd2 // setbe dl +LBB10_174: + LONG $0x06100ff3 // movss xmm0, dword [rsi] LONG $0x04c68348 // add rsi, 4 - WORD $0xdaf6 // neg dl + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x000000ba; BYTE $0x00 // mov edx, 0 + WORD $0xd280; BYTE $0xff // adc dl, -1 LONG $0x07788d48 // lea rdi, [rax + 7] WORD $0x8548; BYTE $0xc0 // test rax, rax LONG $0xf8490f48 // cmovns rdi, rax @@ -49032,263 +50474,293 @@ LBB10_117: LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x01c08348 // add rax, 1 LONG $0x08f88348 // cmp rax, 8 - JNE LBB10_117 + JNE LBB10_174 LONG $0x01c68349 // add r14, 1 -LBB10_119: +LBB10_176: LONG $0x05fbc149 // sar r11, 5 LONG $0x20fa8349 // cmp r10, 32 - JL LBB10_139 + JL LBB10_177 LONG $0x04fb8349 // cmp r11, 4 - JB LBB10_123 + JB LBB10_179 WORD $0x894c; BYTE $0xd8 // mov rax, r11 LONG $0x07e0c148 // shl rax, 7 WORD $0x0148; BYTE $0xf0 // add rax, rsi WORD $0x3949; BYTE $0xc6 // cmp r14, rax - JAE LBB10_200 + JAE LBB10_182 LONG $0x9e048d4b // lea rax, [r14 + 4*r11] WORD $0x3948; BYTE $0xf0 // cmp rax, rsi - JBE LBB10_200 + JBE LBB10_182 -LBB10_123: - WORD $0x3145; BYTE $0xc0 // xor r8d, r8d - WORD $0x8948; BYTE $0xf3 // mov rbx, rsi +LBB10_179: + WORD $0xc031 // xor eax, eax + WORD $0x8948; BYTE $0xf2 // mov rdx, rsi WORD $0x894d; BYTE $0xf7 // mov r15, r14 -LBB10_124: +LBB10_185: LONG $0x247c894c; BYTE $0x08 // mov qword [rsp + 8], r15 LONG $0x2454894c; BYTE $0x48 // mov qword [rsp + 72], r10 QUAD $0x00000140249c894c // mov qword [rsp + 320], r11 - WORD $0x294d; BYTE $0xc3 // sub r11, r8 - QUAD $0x000000a0249c894c // mov qword [rsp + 160], r11 + WORD $0x2949; BYTE $0xc3 // sub r11, rax + QUAD $0x000000f0249c894c // mov qword [rsp + 240], r11 -LBB10_125: - LONG $0x1b2e0f44 // ucomiss xmm11, dword [rbx] - QUAD $0x000001502494960f // setbe byte [rsp + 336] - LONG $0x5b2e0f44; BYTE $0x04 // ucomiss xmm11, dword [rbx + 4] - LONG $0xd0960f41 // setbe r8b - LONG $0x5b2e0f44; BYTE $0x08 // ucomiss xmm11, dword [rbx + 8] - LONG $0xd6960f41 // setbe r14b - LONG $0x5b2e0f44; BYTE $0x0c // ucomiss xmm11, dword [rbx + 12] - LONG $0xd5960f41 // setbe r13b - LONG $0x5b2e0f44; BYTE $0x10 // ucomiss xmm11, dword [rbx + 16] - QUAD $0x000000e02494960f // setbe byte [rsp + 224] - LONG $0x5b2e0f44; BYTE $0x14 // ucomiss xmm11, dword [rbx + 20] - QUAD $0x000000d02494960f // setbe byte [rsp + 208] - LONG $0x5b2e0f44; BYTE $0x18 // ucomiss xmm11, dword [rbx + 24] - WORD $0x960f; BYTE $0xd0 // setbe al - LONG $0x5b2e0f44; BYTE $0x1c // ucomiss xmm11, dword [rbx + 28] - LONG $0xd3960f41 // setbe r11b - LONG $0x5b2e0f44; BYTE $0x20 // ucomiss xmm11, dword [rbx + 32] - QUAD $0x000001002494960f // setbe byte [rsp + 256] - LONG $0x5b2e0f44; BYTE $0x24 // ucomiss xmm11, dword [rbx + 36] - WORD $0x960f; BYTE $0xd2 // setbe dl - LONG $0x5b2e0f44; BYTE $0x28 // ucomiss xmm11, dword [rbx + 40] - LONG $0xd6960f40 // setbe sil - LONG $0x5b2e0f44; BYTE $0x2c // ucomiss xmm11, dword [rbx + 44] - LONG $0xd7960f40 // setbe dil - LONG $0x5b2e0f44; BYTE $0x30 // ucomiss xmm11, dword [rbx + 48] - LONG $0xd2960f41 // setbe r10b - LONG $0x5b2e0f44; BYTE $0x34 // ucomiss xmm11, dword [rbx + 52] - LONG $0xd4960f41 // setbe r12b - LONG $0x5b2e0f44; BYTE $0x38 // ucomiss xmm11, dword [rbx + 56] - QUAD $0x000001102494960f // setbe byte [rsp + 272] - LONG $0x5b2e0f44; BYTE $0x3c // ucomiss xmm11, dword [rbx + 60] - LONG $0xd1960f41 // setbe r9b - LONG $0x5b2e0f44; BYTE $0x40 // ucomiss xmm11, dword [rbx + 64] - QUAD $0x000000b02494960f // setbe byte [rsp + 176] - LONG $0x5b2e0f44; BYTE $0x44 // ucomiss xmm11, dword [rbx + 68] - QUAD $0x000001302494960f // setbe byte [rsp + 304] - LONG $0x5b2e0f44; BYTE $0x48 // ucomiss xmm11, dword [rbx + 72] - QUAD $0x000001202494960f // setbe byte [rsp + 288] - LONG $0x5b2e0f44; BYTE $0x4c // ucomiss xmm11, dword [rbx + 76] - QUAD $0x000000f02494960f // setbe byte [rsp + 240] - LONG $0x5b2e0f44; BYTE $0x50 // ucomiss xmm11, dword [rbx + 80] - QUAD $0x000000c02494960f // setbe byte [rsp + 192] - LONG $0x5b2e0f44; BYTE $0x54 // ucomiss xmm11, dword [rbx + 84] - QUAD $0x000000902494960f // setbe byte [rsp + 144] - LONG $0x5b2e0f44; BYTE $0x58 // ucomiss xmm11, dword [rbx + 88] - QUAD $0x000000802494960f // setbe byte [rsp + 128] - LONG $0x5b2e0f44; BYTE $0x5c // ucomiss xmm11, dword [rbx + 92] - LONG $0xd7960f41 // setbe r15b - LONG $0x5b2e0f44; BYTE $0x60 // ucomiss xmm11, dword [rbx + 96] - LONG $0x2454960f; BYTE $0x30 // setbe byte [rsp + 48] - LONG $0x5b2e0f44; BYTE $0x64 // ucomiss xmm11, dword [rbx + 100] - LONG $0x2454960f; BYTE $0x70 // setbe byte [rsp + 112] - LONG $0x5b2e0f44; BYTE $0x68 // ucomiss xmm11, dword [rbx + 104] - LONG $0x2454960f; BYTE $0x60 // setbe byte [rsp + 96] - LONG $0x5b2e0f44; BYTE $0x6c // ucomiss xmm11, dword [rbx + 108] - LONG $0x2454960f; BYTE $0x50 // setbe byte [rsp + 80] - LONG $0x5b2e0f44; BYTE $0x70 // ucomiss xmm11, dword [rbx + 112] - LONG $0x2454960f; BYTE $0x20 // setbe byte [rsp + 32] - LONG $0x5b2e0f44; BYTE $0x74 // ucomiss xmm11, dword [rbx + 116] - LONG $0x2454960f; BYTE $0x10 // setbe byte [rsp + 16] - LONG $0x5b2e0f44; BYTE $0x78 // ucomiss xmm11, dword [rbx + 120] - QUAD $0x000001602494960f // setbe byte [rsp + 352] - LONG $0x5b2e0f44; BYTE $0x7c // ucomiss xmm11, dword [rbx + 124] - WORD $0x960f; BYTE $0xd1 // setbe cl - WORD $0x0045; BYTE $0xc0 // add r8b, r8b - QUAD $0x0000015024840244 // add r8b, byte [rsp + 336] - WORD $0xe0c0; BYTE $0x06 // shl al, 6 - LONG $0x07e3c041 // shl r11b, 7 - WORD $0x0841; BYTE $0xc3 // or r11b, al - LONG $0x02e6c041 // shl r14b, 2 - WORD $0x0845; BYTE $0xc6 // or r14b, r8b - WORD $0xd200 // add dl, dl - LONG $0x00249402; WORD $0x0001; BYTE $0x00 // add dl, byte [rsp + 256] - LONG $0x03e5c041 // shl r13b, 3 - WORD $0x0845; BYTE $0xf5 // or r13b, r14b +LBB10_186: + LONG $0x02100ff3 // movss xmm0, dword [rdx] + LONG $0x4a100ff3; BYTE $0x04 // movss xmm1, dword [rdx + 4] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000001202494930f // setae byte [rsp + 288] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + WORD $0x930f; BYTE $0xd1 // setae cl + LONG $0x42100ff3; BYTE $0x08 // movss xmm0, dword [rdx + 8] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd6930f40 // setae sil + LONG $0x42100ff3; BYTE $0x0c // movss xmm0, dword [rdx + 12] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd7930f40 // setae dil + LONG $0x42100ff3; BYTE $0x10 // movss xmm0, dword [rdx + 16] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd0930f41 // setae r8b + LONG $0x42100ff3; BYTE $0x14 // movss xmm0, dword [rdx + 20] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000001102494930f // setae byte [rsp + 272] + LONG $0x42100ff3; BYTE $0x18 // movss xmm0, dword [rdx + 24] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000001302494930f // setae byte [rsp + 304] + LONG $0x42100ff3; BYTE $0x1c // movss xmm0, dword [rdx + 28] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000001502494930f // setae byte [rsp + 336] + LONG $0x42100ff3; BYTE $0x20 // movss xmm0, dword [rdx + 32] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000000c02494930f // setae byte [rsp + 192] + LONG $0x42100ff3; BYTE $0x24 // movss xmm0, dword [rdx + 36] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd1930f41 // setae r9b + LONG $0x42100ff3; BYTE $0x28 // movss xmm0, dword [rdx + 40] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd3930f41 // setae r11b + LONG $0x42100ff3; BYTE $0x2c // movss xmm0, dword [rdx + 44] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0xd7930f41 // setae r15b + LONG $0x42100ff3; BYTE $0x30 // movss xmm0, dword [rdx + 48] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + WORD $0x930f; BYTE $0xd0 // setae al + LONG $0x42100ff3; BYTE $0x34 // movss xmm0, dword [rdx + 52] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000000d02494930f // setae byte [rsp + 208] + LONG $0x42100ff3; BYTE $0x38 // movss xmm0, dword [rdx + 56] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000000b02494930f // setae byte [rsp + 176] + LONG $0x42100ff3; BYTE $0x3c // movss xmm0, dword [rdx + 60] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000000e02494930f // setae byte [rsp + 224] + LONG $0x42100ff3; BYTE $0x40 // movss xmm0, dword [rdx + 64] + LONG $0x4a100ff3; BYTE $0x44 // movss xmm1, dword [rdx + 68] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x48 // movss xmm0, dword [rdx + 72] + QUAD $0x000000902494930f // setae byte [rsp + 144] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x4a100ff3; BYTE $0x4c // movss xmm1, dword [rdx + 76] + LONG $0xd2930f41 // setae r10b + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x50 // movss xmm0, dword [rdx + 80] + LONG $0xd6930f41 // setae r14b + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x4a100ff3; BYTE $0x54 // movss xmm1, dword [rdx + 84] + LONG $0xd4930f41 // setae r12b + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x58 // movss xmm0, dword [rdx + 88] + QUAD $0x000001002494930f // setae byte [rsp + 256] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x4a100ff3; BYTE $0x5c // movss xmm1, dword [rdx + 92] + QUAD $0x000000a02494930f // setae byte [rsp + 160] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x60 // movss xmm0, dword [rdx + 96] + LONG $0x2454930f; BYTE $0x70 // setae byte [rsp + 112] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x4a100ff3; BYTE $0x64 // movss xmm1, dword [rdx + 100] + LONG $0xd5930f41 // setae r13b + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x68 // movss xmm0, dword [rdx + 104] + LONG $0x2454930f; BYTE $0x10 // setae byte [rsp + 16] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x4a100ff3; BYTE $0x6c // movss xmm1, dword [rdx + 108] + QUAD $0x000000802494930f // setae byte [rsp + 128] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x70 // movss xmm0, dword [rdx + 112] + LONG $0x2454930f; BYTE $0x60 // setae byte [rsp + 96] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x4a100ff3; BYTE $0x74 // movss xmm1, dword [rdx + 116] + LONG $0x2454930f; BYTE $0x50 // setae byte [rsp + 80] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x42100ff3; BYTE $0x78 // movss xmm0, dword [rdx + 120] + LONG $0x2454930f; BYTE $0x30 // setae byte [rsp + 48] + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + LONG $0x4a100ff3; BYTE $0x7c // movss xmm1, dword [rdx + 124] + LONG $0x2454930f; BYTE $0x20 // setae byte [rsp + 32] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + QUAD $0x000001602494930f // setae byte [rsp + 352] + LONG $0x80ea8348 // sub rdx, -128 + LONG $0xcb2e0f41 // ucomiss xmm1, xmm11 + WORD $0x930f; BYTE $0xd3 // setae bl + WORD $0xc900 // add cl, cl + LONG $0x20248c02; WORD $0x0001; BYTE $0x00 // add cl, byte [rsp + 288] LONG $0x02e6c040 // shl sil, 2 - WORD $0x0840; BYTE $0xd6 // or sil, dl - QUAD $0x000000e02494b60f // movzx edx, byte [rsp + 224] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0844; BYTE $0xea // or dl, r13b - WORD $0x8941; BYTE $0xd0 // mov r8d, edx + WORD $0x0840; BYTE $0xce // or sil, cl LONG $0x03e7c040 // shl dil, 3 WORD $0x0840; BYTE $0xf7 // or dil, sil - QUAD $0x000000d02494b60f // movzx edx, byte [rsp + 208] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0844; BYTE $0xc2 // or dl, r8b - LONG $0x04e2c041 // shl r10b, 4 - WORD $0x0841; BYTE $0xfa // or r10b, dil - LONG $0x05e4c041 // shl r12b, 5 - WORD $0x0845; BYTE $0xd4 // or r12b, r10b - QUAD $0x0000011024b4b60f // movzx esi, byte [rsp + 272] - LONG $0x06e6c040 // shl sil, 6 - LONG $0x07e1c041 // shl r9b, 7 - WORD $0x0841; BYTE $0xf1 // or r9b, sil - WORD $0x0841; BYTE $0xd3 // or r11b, dl - WORD $0x0845; BYTE $0xe1 // or r9b, r12b - QUAD $0x000001302484b60f // movzx eax, byte [rsp + 304] - WORD $0xc000 // add al, al - LONG $0xb0248402; WORD $0x0000; BYTE $0x00 // add al, byte [rsp + 176] - QUAD $0x000001202494b60f // movzx edx, byte [rsp + 288] - WORD $0xe2c0; BYTE $0x02 // shl dl, 2 - WORD $0xc208 // or dl, al - WORD $0xd689 // mov esi, edx - QUAD $0x000000f02494b60f // movzx edx, byte [rsp + 240] - WORD $0xe2c0; BYTE $0x03 // shl dl, 3 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - QUAD $0x000000c02494b60f // movzx edx, byte [rsp + 192] - WORD $0xe2c0; BYTE $0x04 // shl dl, 4 - WORD $0x0840; BYTE $0xf2 // or dl, sil - WORD $0xd689 // mov esi, edx - QUAD $0x000000902494b60f // movzx edx, byte [rsp + 144] - WORD $0xe2c0; BYTE $0x05 // shl dl, 5 - WORD $0x0840; BYTE $0xf2 // or dl, sil - LONG $0x24748b48; BYTE $0x08 // mov rsi, qword [rsp + 8] - WORD $0x8844; BYTE $0x1e // mov byte [rsi], r11b - QUAD $0x0000008024bcb60f // movzx edi, byte [rsp + 128] + LONG $0x04e0c041 // shl r8b, 4 + WORD $0x0841; BYTE $0xf8 // or r8b, dil + QUAD $0x00000110248cb60f // movzx ecx, byte [rsp + 272] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0x0844; BYTE $0xc1 // or cl, r8b + QUAD $0x0000013024bcb60f // movzx edi, byte [rsp + 304] LONG $0x06e7c040 // shl dil, 6 - LONG $0x07e7c041 // shl r15b, 7 - WORD $0x0841; BYTE $0xff // or r15b, dil - LONG $0x014e8844 // mov byte [rsi + 1], r9b - WORD $0x0841; BYTE $0xd7 // or r15b, dl - LONG $0x2444b60f; BYTE $0x70 // movzx eax, byte [rsp + 112] + QUAD $0x0000015024b4b60f // movzx esi, byte [rsp + 336] + LONG $0x07e6c040 // shl sil, 7 + WORD $0x0840; BYTE $0xfe // or sil, dil + WORD $0x0045; BYTE $0xc9 // add r9b, r9b + QUAD $0x000000c0248c0244 // add r9b, byte [rsp + 192] + LONG $0x02e3c041 // shl r11b, 2 + WORD $0x0845; BYTE $0xcb // or r11b, r9b + LONG $0x03e7c041 // shl r15b, 3 + WORD $0x0845; BYTE $0xdf // or r15b, r11b + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xf8 // or al, r15b + WORD $0x0840; BYTE $0xce // or sil, cl + QUAD $0x000000d0248cb60f // movzx ecx, byte [rsp + 208] + WORD $0xe1c0; BYTE $0x05 // shl cl, 5 + WORD $0xc108 // or cl, al + QUAD $0x000000b02484b60f // movzx eax, byte [rsp + 176] + WORD $0xe0c0; BYTE $0x06 // shl al, 6 + QUAD $0x000000e024bcb60f // movzx edi, byte [rsp + 224] + LONG $0x07e7c040 // shl dil, 7 + WORD $0x0840; BYTE $0xc7 // or dil, al + WORD $0x0045; BYTE $0xd2 // add r10b, r10b + QUAD $0x0000009024940244 // add r10b, byte [rsp + 144] + LONG $0x02e6c041 // shl r14b, 2 + WORD $0x0845; BYTE $0xd6 // or r14b, r10b + LONG $0x03e4c041 // shl r12b, 3 + WORD $0x0845; BYTE $0xf4 // or r12b, r14b + QUAD $0x000001002484b60f // movzx eax, byte [rsp + 256] + WORD $0xe0c0; BYTE $0x04 // shl al, 4 + WORD $0x0844; BYTE $0xe0 // or al, r12b + WORD $0x8941; BYTE $0xc0 // mov r8d, eax + QUAD $0x000000a02484b60f // movzx eax, byte [rsp + 160] + WORD $0xe0c0; BYTE $0x05 // shl al, 5 + WORD $0x0844; BYTE $0xc0 // or al, r8b + WORD $0x0840; BYTE $0xcf // or dil, cl + LONG $0x244cb60f; BYTE $0x70 // movzx ecx, byte [rsp + 112] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + LONG $0x07e5c041 // shl r13b, 7 + WORD $0x0841; BYTE $0xcd // or r13b, cl + LONG $0x24448b4c; BYTE $0x08 // mov r8, qword [rsp + 8] + WORD $0x8841; BYTE $0x30 // mov byte [r8], sil + WORD $0x0841; BYTE $0xc5 // or r13b, al + QUAD $0x000000802484b60f // movzx eax, byte [rsp + 128] WORD $0xc000 // add al, al - LONG $0x30244402 // add al, byte [rsp + 48] - WORD $0xc289 // mov edx, eax + LONG $0x10244402 // add al, byte [rsp + 16] + WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x60 // movzx eax, byte [rsp + 96] WORD $0xe0c0; BYTE $0x02 // shl al, 2 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax LONG $0x2444b60f; BYTE $0x50 // movzx eax, byte [rsp + 80] WORD $0xe0c0; BYTE $0x03 // shl al, 3 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x30 // movzx eax, byte [rsp + 48] WORD $0xe0c0; BYTE $0x04 // shl al, 4 - WORD $0xd008 // or al, dl - WORD $0xc289 // mov edx, eax - LONG $0x2444b60f; BYTE $0x10 // movzx eax, byte [rsp + 16] + WORD $0xc808 // or al, cl + WORD $0xc189 // mov ecx, eax + LONG $0x2444b60f; BYTE $0x20 // movzx eax, byte [rsp + 32] WORD $0xe0c0; BYTE $0x05 // shl al, 5 - WORD $0xd008 // or al, dl - QUAD $0x000001602494b60f // movzx edx, byte [rsp + 352] - WORD $0xe2c0; BYTE $0x06 // shl dl, 6 - WORD $0xe1c0; BYTE $0x07 // shl cl, 7 - WORD $0xd108 // or cl, dl - WORD $0xc108 // or cl, al - LONG $0x027e8844 // mov byte [rsi + 2], r15b - WORD $0x4e88; BYTE $0x03 // mov byte [rsi + 3], cl - LONG $0x80c38148; WORD $0x0000; BYTE $0x00 // add rbx, 128 - LONG $0x04c68348 // add rsi, 4 - LONG $0x24748948; BYTE $0x08 // mov qword [rsp + 8], rsi - QUAD $0x000000a024848348; BYTE $0xff // add qword [rsp + 160], -1 - JNE LBB10_125 + WORD $0xc808 // or al, cl + LONG $0x01788841 // mov byte [r8 + 1], dil + QUAD $0x00000160248cb60f // movzx ecx, byte [rsp + 352] + WORD $0xe1c0; BYTE $0x06 // shl cl, 6 + WORD $0xe3c0; BYTE $0x07 // shl bl, 7 + WORD $0xcb08 // or bl, cl + LONG $0x02688845 // mov byte [r8 + 2], r13b + WORD $0xc308 // or bl, al + LONG $0x03588841 // mov byte [r8 + 3], bl + LONG $0x04c08349 // add r8, 4 + LONG $0x2444894c; BYTE $0x08 // mov qword [rsp + 8], r8 + QUAD $0x000000f024848348; BYTE $0xff // add qword [rsp + 240], -1 + JNE LBB10_186 LONG $0x247c8b4c; BYTE $0x08 // mov r15, qword [rsp + 8] LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] QUAD $0x00000140249c8b4c // mov r11, qword [rsp + 320] - JMP LBB10_140 + JMP LBB10_188 -LBB10_127: - LONG $0x2474894c; BYTE $0x70 // mov qword [rsp + 112], r14 +LBB10_9: + QUAD $0x0000009024b4894c // mov qword [rsp + 144], r14 -LBB10_128: +LBB10_91: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_151 + JNE LBB10_94 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - JMP LBB10_154 + JMP LBB10_97 -LBB10_131: - QUAD $0x000000d024b4894c // mov qword [rsp + 208], r14 +LBB10_61: + QUAD $0x000000e024b4894c // mov qword [rsp + 224], r14 -LBB10_132: +LBB10_72: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_156 + JNE LBB10_75 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - JMP LBB10_159 + JMP LBB10_78 -LBB10_135: +LBB10_119: WORD $0x894d; BYTE $0xf4 // mov r12, r14 WORD $0x8949; BYTE $0xf3 // mov r11, rsi -LBB10_136: +LBB10_130: LONG $0x05e7c149 // shl r15, 5 WORD $0x394d; BYTE $0xd7 // cmp r15, r10 - JGE LBB10_182 + JGE LBB10_201 WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xf8 // sub r8, r15 WORD $0xf749; BYTE $0xd7 // not r15 WORD $0x014d; BYTE $0xd7 // add r15, r10 - JNE LBB10_183 + JNE LBB10_135 WORD $0xf631 // xor esi, esi - JMP LBB10_185 + JMP LBB10_133 -LBB10_139: +LBB10_177: WORD $0x894d; BYTE $0xf7 // mov r15, r14 - WORD $0x8948; BYTE $0xf3 // mov rbx, rsi + WORD $0x8948; BYTE $0xf2 // mov rdx, rsi -LBB10_140: +LBB10_188: LONG $0x05e3c149 // shl r11, 5 WORD $0x394d; BYTE $0xd3 // cmp r11, r10 - JGE LBB10_182 + JGE LBB10_201 + WORD $0x8948; BYTE $0xd0 // mov rax, rdx WORD $0x894d; BYTE $0xd0 // mov r8, r10 WORD $0x294d; BYTE $0xd8 // sub r8, r11 WORD $0xf749; BYTE $0xd3 // not r11 WORD $0x014d; BYTE $0xd3 // add r11, r10 - JNE LBB10_187 + JNE LBB10_193 WORD $0xf631 // xor esi, esi - JMP LBB10_189 + JMP LBB10_191 -LBB10_143: +LBB10_155: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB10_144: +LBB10_156: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 LONG $0x000000bf; BYTE $0x00 // mov edi, 0 LONG $0xffd78040 // adc dil, -1 @@ -49316,21 +50788,21 @@ LBB10_144: WORD $0xc330 // xor bl, al LONG $0x161c8841 // mov byte [r14 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB10_144 + JNE LBB10_156 -LBB10_145: +LBB10_40: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 + JE LBB10_201 WORD $0xc031 // xor eax, eax WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 - JMP LBB10_174 + JMP LBB10_199 -LBB10_147: +LBB10_153: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB10_148: +LBB10_154: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d WORD $0x9d0f; BYTE $0xd0 // setge al WORD $0xd8f6 // neg al @@ -49358,21 +50830,21 @@ LBB10_148: WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_148 + JNE LBB10_154 -LBB10_149: +LBB10_150: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 + JE LBB10_201 WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - JMP LBB10_179 + JMP LBB10_152 -LBB10_151: - WORD $0x894d; BYTE $0xc2 // mov r10, r8 - LONG $0xfee28349 // and r10, -2 - WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] +LBB10_94: + WORD $0x894d; BYTE $0xc2 // mov r10, r8 + LONG $0xfee28349 // and r10, -2 + WORD $0x3145; BYTE $0xc9 // xor r9d, r9d + QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] -LBB10_152: +LBB10_95: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e1c3846 // cmp byte [rsi + r9], r11b WORD $0x9d0f; BYTE $0xd3 // setge bl @@ -49400,34 +50872,34 @@ LBB10_152: WORD $0xd030 // xor al, dl LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB10_152 + JNE LBB10_95 WORD $0x014c; BYTE $0xce // add rsi, r9 -LBB10_154: - LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 - WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b - WORD $0x9d0f; BYTE $0xd0 // setge al - WORD $0xd8f6 // neg al - WORD $0x894c; BYTE $0xca // mov rdx, r9 - LONG $0x03eac148 // shr rdx, 3 - LONG $0x24448b4c; BYTE $0x70 // mov r8, qword [rsp + 112] - LONG $0x103c8a41 // mov dil, byte [r8 + rdx] - LONG $0x07e18041 // and r9b, 7 - WORD $0x01b3 // mov bl, 1 - WORD $0x8944; BYTE $0xc9 // mov ecx, r9d - WORD $0xe3d2 // shl bl, cl - WORD $0x3040; BYTE $0xf8 // xor al, dil - WORD $0xc320 // and bl, al - JMP LBB10_161 +LBB10_97: + LONG $0x01c0f641 // test r8b, 1 + JE LBB10_201 + WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b + WORD $0x9d0f; BYTE $0xd0 // setge al + WORD $0xd8f6 // neg al + WORD $0x894c; BYTE $0xca // mov rdx, r9 + LONG $0x03eac148 // shr rdx, 3 + QUAD $0x0000009024848b4c // mov r8, qword [rsp + 144] + LONG $0x103c8a41 // mov dil, byte [r8 + rdx] + LONG $0x07e18041 // and r9b, 7 + WORD $0x01b3 // mov bl, 1 + WORD $0x8944; BYTE $0xc9 // mov ecx, r9d + WORD $0xe3d2 // shl bl, cl + WORD $0x3040; BYTE $0xf8 // xor al, dil + WORD $0xc320 // and bl, al + JMP LBB10_80 -LBB10_156: +LBB10_75: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xc9 // xor r9d, r9d - QUAD $0x000000d024b48b4c // mov r14, qword [rsp + 208] + QUAD $0x000000e024b48b4c // mov r14, qword [rsp + 224] -LBB10_157: +LBB10_76: WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x0e1c3846 // cmp byte [rsi + r9], r11b LONG $0x000000bb; BYTE $0x00 // mov ebx, 0 @@ -49455,18 +50927,18 @@ LBB10_157: WORD $0xd030 // xor al, dl LONG $0x3e048841 // mov byte [r14 + rdi], al WORD $0x394d; BYTE $0xca // cmp r10, r9 - JNE LBB10_157 + JNE LBB10_76 WORD $0x014c; BYTE $0xce // add rsi, r9 -LBB10_159: +LBB10_78: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 + JE LBB10_201 WORD $0xc031 // xor eax, eax WORD $0x3844; BYTE $0x1e // cmp byte [rsi], r11b WORD $0xff14 // adc al, -1 WORD $0x894c; BYTE $0xca // mov rdx, r9 LONG $0x03eac148 // shr rdx, 3 - QUAD $0x000000d024848b4c // mov r8, qword [rsp + 208] + QUAD $0x000000e024848b4c // mov r8, qword [rsp + 224] LONG $0x103c8a41 // mov dil, byte [r8 + rdx] LONG $0x07e18041 // and r9b, 7 WORD $0x01b3 // mov bl, 1 @@ -49475,17 +50947,17 @@ LBB10_159: WORD $0x3040; BYTE $0xf8 // xor al, dil WORD $0xc320 // and bl, al -LBB10_161: +LBB10_80: WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x101c8841 // mov byte [r8 + rdx], bl - JMP LBB10_182 + JMP LBB10_201 -LBB10_162: +LBB10_137: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB10_163: +LBB10_138: WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d LONG $0x000000bf; BYTE $0x00 // mov edi, 0 LONG $0xffd78040 // adc dil, -1 @@ -49513,40 +50985,42 @@ LBB10_163: WORD $0xc330 // xor bl, al LONG $0x161c8841 // mov byte [r14 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB10_163 + JNE LBB10_138 -LBB10_164: +LBB10_24: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 + JE LBB10_201 WORD $0xc031 // xor eax, eax WORD $0x3944; BYTE $0x2e // cmp dword [rsi], r13d - JMP LBB10_174 + JMP LBB10_199 -LBB10_166: +LBB10_195: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB10_167: - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - WORD $0x960f; BYTE $0xd0 // setbe al - WORD $0xd8f6 // neg al +LBB10_196: + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x000000b8; BYTE $0x00 // mov eax, 0 + WORD $0xff14 // adc al, -1 WORD $0x894c; BYTE $0xdf // mov rdi, r11 LONG $0x03efc148 // shr rdi, 3 LONG $0x0cb60f45; BYTE $0x3e // movzx r9d, byte [r14 + rdi] + WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0x8944; BYTE $0xd9 // mov ecx, r11d WORD $0xe180; BYTE $0x06 // and cl, 6 WORD $0x01b3 // mov bl, 1 WORD $0xe3d2 // shl bl, cl - WORD $0x3044; BYTE $0xc8 // xor al, r9b WORD $0xc320 // and bl, al WORD $0x3044; BYTE $0xcb // xor bl, r9b LONG $0x3e1c8841 // mov byte [r14 + rdi], bl LONG $0x02c38349 // add r11, 2 - LONG $0x462e0f66; BYTE $0x08 // ucomisd xmm0, qword [rsi + 8] - WORD $0x960f; BYTE $0xd0 // setbe al + LONG $0x4e100ff2; BYTE $0x08 // movsd xmm1, qword [rsi + 8] LONG $0x10c68348 // add rsi, 16 - WORD $0xd8f6 // neg al + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + LONG $0x000000b8; BYTE $0x00 // mov eax, 0 + WORD $0xff14 // adc al, -1 WORD $0xd830 // xor al, bl WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b2 // mov dl, 1 @@ -49555,21 +51029,22 @@ LBB10_167: WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_167 + JNE LBB10_196 -LBB10_168: - LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 - LONG $0x062e0f66 // ucomisd xmm0, qword [rsi] - WORD $0x960f; BYTE $0xd0 // setbe al - JMP LBB10_180 +LBB10_197: + LONG $0x01c0f641 // test r8b, 1 + JE LBB10_201 + LONG $0x0e100ff2 // movsd xmm1, qword [rsi] + WORD $0xc031 // xor eax, eax + LONG $0xc82e0f66 // ucomisd xmm1, xmm0 + JMP LBB10_199 -LBB10_170: +LBB10_112: WORD $0x894d; BYTE $0xc1 // mov r9, r8 LONG $0xfee18349 // and r9, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB10_171: +LBB10_113: LONG $0x2e394466 // cmp word [rsi], r13w LONG $0x000000bf; BYTE $0x00 // mov edi, 0 LONG $0xffd78040 // adc dil, -1 @@ -49597,15 +51072,15 @@ LBB10_171: WORD $0xc330 // xor bl, al LONG $0x161c8841 // mov byte [r14 + rdx], bl WORD $0x394d; BYTE $0xd9 // cmp r9, r11 - JNE LBB10_171 + JNE LBB10_113 -LBB10_172: +LBB10_110: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 + JE LBB10_201 WORD $0xc031 // xor eax, eax LONG $0x2e394466 // cmp word [rsi], r13w -LBB10_174: +LBB10_199: WORD $0xff14 // adc al, -1 WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 @@ -49616,14 +51091,14 @@ LBB10_174: WORD $0xe3d2 // shl bl, cl WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al - JMP LBB10_181 + JMP LBB10_200 -LBB10_175: +LBB10_170: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0x3145; BYTE $0xdb // xor r11d, r11d -LBB10_176: +LBB10_171: WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 WORD $0x9d0f; BYTE $0xd0 // setge al WORD $0xd8f6 // neg al @@ -49651,17 +51126,15 @@ LBB10_176: WORD $0xda30 // xor dl, bl LONG $0x3e148841 // mov byte [r14 + rdi], dl WORD $0x394d; BYTE $0xda // cmp r10, r11 - JNE LBB10_176 + JNE LBB10_171 -LBB10_177: +LBB10_168: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 + JE LBB10_201 WORD $0x394c; BYTE $0x2e // cmp qword [rsi], r13 -LBB10_179: +LBB10_152: WORD $0x9d0f; BYTE $0xd0 // setge al - -LBB10_180: WORD $0xd8f6 // neg al WORD $0x894c; BYTE $0xda // mov rdx, r11 LONG $0x03eac148 // shr rdx, 3 @@ -49673,21 +51146,21 @@ LBB10_180: WORD $0x3040; BYTE $0xf0 // xor al, sil WORD $0xc320 // and bl, al -LBB10_181: +LBB10_200: WORD $0x3040; BYTE $0xf3 // xor bl, sil LONG $0x161c8841 // mov byte [r14 + rdx], bl -LBB10_182: +LBB10_201: MOVQ 496(SP), SP RET -LBB10_183: +LBB10_135: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi QUAD $0x0000018824b48b44 // mov r14d, dword [rsp + 392] -LBB10_184: +LBB10_136: LONG $0x33394566 // cmp word [r11], r14w WORD $0x9d0f; BYTE $0xd3 // setge bl WORD $0xdbf6 // neg bl @@ -49715,11 +51188,11 @@ LBB10_184: WORD $0xd030 // xor al, dl LONG $0x3c048841 // mov byte [r12 + rdi], al WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB10_184 + JNE LBB10_136 -LBB10_185: +LBB10_133: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 + JE LBB10_201 LONG $0x8824848b; WORD $0x0001; BYTE $0x00 // mov eax, dword [rsp + 392] LONG $0x03394166 // cmp word [r11], ax WORD $0x9d0f; BYTE $0xd0 // setge al @@ -49735,53 +51208,57 @@ LBB10_185: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x141c8841 // mov byte [r12 + rdx], bl - JMP LBB10_182 + JMP LBB10_201 -LBB10_187: +LBB10_193: WORD $0x894d; BYTE $0xc2 // mov r10, r8 LONG $0xfee28349 // and r10, -2 WORD $0xf631 // xor esi, esi WORD $0x894d; BYTE $0xfb // mov r11, r15 -LBB10_188: - LONG $0x1b2e0f44 // ucomiss xmm11, dword [rbx] - WORD $0x960f; BYTE $0xd2 // setbe dl - WORD $0xdaf6 // neg dl +LBB10_194: + LONG $0x00100ff3 // movss xmm0, dword [rax] + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x000000bb; BYTE $0x00 // mov ebx, 0 + WORD $0xd380; BYTE $0xff // adc bl, -1 WORD $0x8948; BYTE $0xf7 // mov rdi, rsi LONG $0x03efc148 // shr rdi, 3 LONG $0x0cb60f45; BYTE $0x3b // movzx r9d, byte [r11 + rdi] + WORD $0x3044; BYTE $0xcb // xor bl, r9b WORD $0xf189 // mov ecx, esi WORD $0xe180; BYTE $0x06 // and cl, 6 - WORD $0x01b0 // mov al, 1 - WORD $0xe0d2 // shl al, cl - WORD $0x3044; BYTE $0xca // xor dl, r9b - WORD $0xd020 // and al, dl - WORD $0x3044; BYTE $0xc8 // xor al, r9b - LONG $0x3b048841 // mov byte [r11 + rdi], al - LONG $0x02c68348 // add rsi, 2 - LONG $0x5b2e0f44; BYTE $0x04 // ucomiss xmm11, dword [rbx + 4] - LONG $0xd1960f41 // setbe r9b - LONG $0x08c38348 // add rbx, 8 - WORD $0xf641; BYTE $0xd9 // neg r9b - WORD $0x3041; BYTE $0xc1 // xor r9b, al - WORD $0xc980; BYTE $0x01 // or cl, 1 WORD $0x01b2 // mov dl, 1 WORD $0xe2d2 // shl dl, cl - WORD $0x2044; BYTE $0xca // and dl, r9b - WORD $0xc230 // xor dl, al + WORD $0xda20 // and dl, bl + WORD $0x3044; BYTE $0xca // xor dl, r9b LONG $0x3b148841 // mov byte [r11 + rdi], dl + LONG $0x02c68348 // add rsi, 2 + LONG $0x40100ff3; BYTE $0x04 // movss xmm0, dword [rax + 4] + LONG $0x08c08348 // add rax, 8 + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + LONG $0x000000bb; BYTE $0x00 // mov ebx, 0 + WORD $0xd380; BYTE $0xff // adc bl, -1 + WORD $0xd330 // xor bl, dl + WORD $0xc980; BYTE $0x01 // or cl, 1 + WORD $0x8949; BYTE $0xc1 // mov r9, rax + WORD $0x01b0 // mov al, 1 + WORD $0xe0d2 // shl al, cl + WORD $0xd820 // and al, bl + WORD $0xd030 // xor al, dl + LONG $0x3b048841 // mov byte [r11 + rdi], al + WORD $0x894c; BYTE $0xc8 // mov rax, r9 WORD $0x3949; BYTE $0xf2 // cmp r10, rsi - JNE LBB10_188 + JNE LBB10_194 -LBB10_189: +LBB10_191: LONG $0x01c0f641 // test r8b, 1 - JE LBB10_182 - LONG $0x1b2e0f44 // ucomiss xmm11, dword [rbx] - WORD $0x960f; BYTE $0xd0 // setbe al - WORD $0xd8f6 // neg al + JE LBB10_201 + LONG $0x00100ff3 // movss xmm0, dword [rax] + WORD $0xc031 // xor eax, eax + LONG $0xc32e0f41 // ucomiss xmm0, xmm11 + WORD $0xff14 // adc al, -1 WORD $0x8948; BYTE $0xf2 // mov rdx, rsi LONG $0x03eac148 // shr rdx, 3 - WORD $0x894d; BYTE $0xfe // mov r14, r15 LONG $0x173c8a41 // mov dil, byte [r15 + rdx] LONG $0x07e68040 // and sil, 7 WORD $0x01b3 // mov bl, 1 @@ -49791,17 +51268,17 @@ LBB10_189: WORD $0xc320 // and bl, al WORD $0x3040; BYTE $0xfb // xor bl, dil LONG $0x171c8841 // mov byte [r15 + rdx], bl - JMP LBB10_182 + JMP LBB10_201 -LBB10_191: +LBB10_85: LONG $0xf0e78349 // and r15, -16 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 WORD $0x0148; BYTE $0xf0 // add rax, rsi QUAD $0x0000017024848948 // mov qword [rsp + 368], rax - QUAD $0x000000a024bc894c // mov qword [rsp + 160], r15 + QUAD $0x000000f024bc894c // mov qword [rsp + 240], r15 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] - LONG $0x24448948; BYTE $0x70 // mov qword [rsp + 112], rax + QUAD $0x0000009024848948 // mov qword [rsp + 144], rax LONG $0xc3b60f41 // movzx eax, r11b LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 @@ -49810,8 +51287,8 @@ LBB10_191: WORD $0x3145; BYTE $0xc0 // xor r8d, r8d QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 -LBB10_192: - QUAD $0x000001202484894c // mov qword [rsp + 288], r8 +LBB10_86: + QUAD $0x000001002484894c // mov qword [rsp + 256], r8 LONG $0x05e0c149 // shl r8, 5 WORD $0x894d; BYTE $0xc1 // mov r9, r8 WORD $0x894c; BYTE $0xc7 // mov rdi, r8 @@ -49836,12 +51313,12 @@ LBB10_192: LONG $0xf16e0f66 // movd xmm6, ecx LONG $0x4cb60f42; WORD $0x0506 // movzx ecx, byte [rsi + r8 + 5] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00009024847f0f66; BYTE $0x00 // movdqa oword [rsp + 144], xmm0 + LONG $0x447f0f66; WORD $0x7024 // movdqa oword [rsp + 112], xmm0 LONG $0x4cb60f42; WORD $0x0606 // movzx ecx, byte [rsi + r8 + 6] LONG $0xf96e0f66 // movd xmm7, ecx LONG $0x4cb60f42; WORD $0x0706 // movzx ecx, byte [rsi + r8 + 7] LONG $0xc16e0f66 // movd xmm0, ecx - QUAD $0x00011024847f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm0 + QUAD $0x00013024847f0f66; BYTE $0x00 // movdqa oword [rsp + 304], xmm0 LONG $0x4cb60f42; WORD $0x0806 // movzx ecx, byte [rsi + r8 + 8] LONG $0x6e0f4466; BYTE $0xe9 // movd xmm13, ecx LONG $0x4cb60f42; WORD $0x0906 // movzx ecx, byte [rsi + r8 + 9] @@ -49854,7 +51331,7 @@ LBB10_192: LONG $0x6e0f4466; BYTE $0xe1 // movd xmm12, ecx LONG $0x4cb60f42; WORD $0x1006 // movzx ecx, byte [rsi + r8 + 16] LONG $0x6e0f4466; BYTE $0xf1 // movd xmm14, ecx - QUAD $0x000000d02484894c // mov qword [rsp + 208], r8 + QUAD $0x000000c02484894c // mov qword [rsp + 192], r8 LONG $0x4cb60f42; WORD $0x1806 // movzx ecx, byte [rsi + r8 + 24] LONG $0xe96e0f66 // movd xmm5, ecx WORD $0x894c; BYTE $0xc1 // mov rcx, r8 @@ -49877,7 +51354,7 @@ LBB10_192: LONG $0x60ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 352 LONG $0x24548948; BYTE $0x30 // mov qword [rsp + 48], rdx LONG $0x80ce8149; WORD $0x0001; BYTE $0x00 // or r14, 384 - QUAD $0x000000c024b4894c // mov qword [rsp + 192], r14 + QUAD $0x000000b024b4894c // mov qword [rsp + 176], r14 LONG $0xa0cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 416 LONG $0x247c8948; BYTE $0x60 // mov qword [rsp + 96], rdi WORD $0x894c; BYTE $0xc7 // mov rdi, r8 @@ -49902,7 +51379,7 @@ LBB10_192: QUAD $0x0d2e0c203a0f4666 // pinsrb xmm9, byte [rsi + r13], 13 LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] QUAD $0x0e0e0c203a0f4466 // pinsrb xmm9, byte [rsi + rcx], 14 - QUAD $0x000000e024bc8948 // mov qword [rsp + 224], rdi + QUAD $0x000000d024bc8948 // mov qword [rsp + 208], rdi QUAD $0x0f3e0c203a0f4466 // pinsrb xmm9, byte [rsi + rdi], 15 QUAD $0x014024bc6f0f4466; WORD $0x0000 // movdqa xmm15, oword [rsp + 320] LONG $0x6f0f4566; BYTE $0xdf // movdqa xmm11, xmm15 @@ -49981,7 +51458,7 @@ LBB10_192: LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] QUAD $0x0e040e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 4], 14 QUAD $0x0f043e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 4], 15 - QUAD $0x000090249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 144] + LONG $0x5c6f0f66; WORD $0x7024 // movdqa xmm3, oword [rsp + 112] LONG $0x244c8b48; BYTE $0x50 // mov rcx, qword [rsp + 80] QUAD $0x01050e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 5], 1 QUAD $0x052e5c203a0f4266; BYTE $0x02 // pinsrb xmm3, byte [rsi + r13 + 5], 2 @@ -50000,7 +51477,7 @@ LBB10_192: LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] QUAD $0x0e050e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 5], 14 QUAD $0x0f053e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 5], 15 - QUAD $0x000090249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 144], xmm3 + LONG $0x5c7f0f66; WORD $0x7024 // movdqa oword [rsp + 112], xmm3 LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x01063e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 6], 1 WORD $0x894c; BYTE $0xe9 // mov rcx, r13 @@ -50033,7 +51510,7 @@ LBB10_192: LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] QUAD $0x080e6c203a0f4466; BYTE $0x0e // pinsrb xmm13, byte [rsi + rcx + 8], 14 LONG $0x640f4566; BYTE $0xd9 // pcmpgtb xmm11, xmm9 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] + QUAD $0x000000d024bc8b48 // mov rdi, qword [rsp + 208] QUAD $0x083e6c203a0f4466; BYTE $0x0f // pinsrb xmm13, byte [rsi + rdi + 8], 15 LONG $0x6f0f4566; BYTE $0xcf // movdqa xmm9, xmm15 LONG $0x640f4566; BYTE $0xcd // pcmpgtb xmm9, xmm13 @@ -50042,11 +51519,11 @@ LBB10_192: QUAD $0x100e74203a0f4466; BYTE $0x02 // pinsrb xmm14, byte [rsi + rcx + 16], 2 QUAD $0x100e74203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r9 + 16], 3 WORD $0x894d; BYTE $0xce // mov r14, r9 - QUAD $0x000000b0248c894c // mov qword [rsp + 176], r9 + QUAD $0x000000a0248c894c // mov qword [rsp + 160], r9 QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] QUAD $0x100e74203a0f4466; BYTE $0x04 // pinsrb xmm14, byte [rsi + rcx + 16], 4 QUAD $0x102674203a0f4666; BYTE $0x05 // pinsrb xmm14, byte [rsi + r12 + 16], 5 - QUAD $0x000000f024a4894c // mov qword [rsp + 240], r12 + QUAD $0x000000e024a4894c // mov qword [rsp + 224], r12 QUAD $0x103e74203a0f4666; BYTE $0x06 // pinsrb xmm14, byte [rsi + r15 + 16], 6 QUAD $0x101e74203a0f4466; BYTE $0x07 // pinsrb xmm14, byte [rsi + rbx + 16], 7 QUAD $0x101e74203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r11 + 16], 8 @@ -50054,7 +51531,7 @@ LBB10_192: QUAD $0x100674203a0f4466; BYTE $0x0a // pinsrb xmm14, byte [rsi + rax + 16], 10 WORD $0x8949; BYTE $0xc1 // mov r9, rax QUAD $0x101674203a0f4466; BYTE $0x0b // pinsrb xmm14, byte [rsi + rdx + 16], 11 - QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] QUAD $0x101674203a0f4466; BYTE $0x0c // pinsrb xmm14, byte [rsi + rdx + 16], 12 QUAD $0x100674203a0f4666; BYTE $0x0d // pinsrb xmm14, byte [rsi + r8 + 16], 13 LONG $0x246c8b4c; BYTE $0x10 // mov r13, qword [rsp + 16] @@ -50081,13 +51558,13 @@ LBB10_192: QUAD $0x182e6c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r13 + 24], 14 QUAD $0x0f183e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 24], 15 LONG $0x640f4166; BYTE $0xde // pcmpgtb xmm3, xmm14 - QUAD $0x000130249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 304], xmm3 + QUAD $0x000150249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 336], xmm3 LONG $0x6f0f4166; BYTE $0xdf // movdqa xmm3, xmm15 LONG $0xdd640f66 // pcmpgtb xmm3, xmm5 - QUAD $0x000100249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm3 + QUAD $0x000120249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 288], xmm3 LONG $0x6f0f4166; BYTE $0xef // movdqa xmm5, xmm15 LONG $0xea640f66 // pcmpgtb xmm5, xmm2 - QUAD $0x000000d0248c8b48 // mov rcx, qword [rsp + 208] + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] LONG $0x0e54b60f; BYTE $0x0d // movzx edx, byte [rsi + rcx + 13] LONG $0xd26e0f66 // movd xmm2, edx QUAD $0x06067c203a0f4266; BYTE $0x0d // pinsrb xmm7, byte [rsi + r8 + 6], 13 @@ -50120,7 +51597,7 @@ LBB10_192: LONG $0xe8f80f66 // psubb xmm5, xmm0 LONG $0xeb0f4466; BYTE $0xf5 // por xmm14, xmm5 LONG $0x6f0f4166; BYTE $0xef // movdqa xmm5, xmm15 - QUAD $0x00009024ac640f66; BYTE $0x00 // pcmpgtb xmm5, oword [rsp + 144] + LONG $0x6c640f66; WORD $0x7024 // pcmpgtb xmm5, oword [rsp + 112] LONG $0x6f0f4566; BYTE $0xef // movdqa xmm13, xmm15 LONG $0x6f0f4566; BYTE $0xdf // movdqa xmm11, xmm15 LONG $0x640f4466; BYTE $0xef // pcmpgtb xmm13, xmm7 @@ -50133,16 +51610,16 @@ LBB10_192: LONG $0xeb0f4466; BYTE $0xed // por xmm13, xmm5 LONG $0x0e54b60f; BYTE $0x13 // movzx edx, byte [rsi + rcx + 19] LONG $0x6e0f4466; BYTE $0xfa // movd xmm15, edx - QUAD $0x00011024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 272] + QUAD $0x00013024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 304] LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] QUAD $0x01073e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 7], 1 LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] QUAD $0x02070644203a0f66 // pinsrb xmm0, byte [rsi + rax + 7], 2 - QUAD $0x000000b0248c8b48 // mov rcx, qword [rsp + 176] + QUAD $0x000000a0248c8b48 // mov rcx, qword [rsp + 160] QUAD $0x03070e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 7], 3 QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x04070644203a0f66 // pinsrb xmm0, byte [rsi + rax + 7], 4 - QUAD $0x000000f024bc8b4c // mov r15, qword [rsp + 240] + QUAD $0x000000e024bc8b4c // mov r15, qword [rsp + 224] QUAD $0x073e44203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r15 + 7], 5 QUAD $0x073644203a0f4266; BYTE $0x06 // pinsrb xmm0, byte [rsi + r14 + 7], 6 QUAD $0x07071e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 7], 7 @@ -50151,11 +51628,11 @@ LBB10_192: QUAD $0x070e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r9 + 7], 10 LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] QUAD $0x0b071644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 7], 11 - QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] QUAD $0x0c071644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 7], 12 QUAD $0x070644203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r8 + 7], 13 QUAD $0x072e44203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r13 + 7], 14 - QUAD $0x000000e024a48b4c // mov r12, qword [rsp + 224] + QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] QUAD $0x072644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r12 + 7], 15 QUAD $0x093e44203a0f4466; BYTE $0x01 // pinsrb xmm8, byte [rsi + rdi + 9], 1 LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] @@ -50313,7 +51790,7 @@ LBB10_192: QUAD $0x121e7c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r11 + 18], 8 QUAD $0x12167c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r10 + 18], 9 QUAD $0x120e7c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r9 + 18], 10 - QUAD $0x00000150248c894c // mov qword [rsp + 336], r9 + QUAD $0x00000110248c894c // mov qword [rsp + 272], r9 LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] QUAD $0x0b123e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 18], 11 QUAD $0x0c12167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 18], 12 @@ -50327,7 +51804,7 @@ LBB10_192: QUAD $0x132e7c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r13 + 19], 2 QUAD $0x130e7c203a0f4466; BYTE $0x03 // pinsrb xmm15, byte [rsi + rcx + 19], 3 QUAD $0x13067c203a0f4466; BYTE $0x04 // pinsrb xmm15, byte [rsi + rax + 19], 4 - QUAD $0x000000f0248c8b48 // mov rcx, qword [rsp + 240] + QUAD $0x000000e0248c8b48 // mov rcx, qword [rsp + 224] QUAD $0x130e7c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rcx + 19], 5 QUAD $0x13367c203a0f4666; BYTE $0x06 // pinsrb xmm15, byte [rsi + r14 + 19], 6 QUAD $0x131e7c203a0f4466; BYTE $0x07 // pinsrb xmm15, byte [rsi + rbx + 19], 7 @@ -50340,11 +51817,11 @@ LBB10_192: LONG $0x456f0f66; BYTE $0x60 // movdqa xmm0, oword 96[rbp] /* [rip + .LCPI10_6] */ LONG $0xe8df0f66 // pandn xmm5, xmm0 LONG $0xeb0f4166; BYTE $0xed // por xmm5, xmm13 - QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x000000c024848b48 // mov rax, qword [rsp + 192] LONG $0x0654b60f; BYTE $0x14 // movzx edx, byte [rsi + rax + 20] LONG $0xc26e0f66 // movd xmm0, edx LONG $0xeb0f4166; BYTE $0xee // por xmm5, xmm14 - QUAD $0x00009024ac7f0f66; BYTE $0x00 // movdqa oword [rsp + 144], xmm5 + LONG $0x6c7f0f66; WORD $0x7024 // movdqa oword [rsp + 112], xmm5 LONG $0x6f0f4566; BYTE $0xeb // movdqa xmm13, xmm11 LONG $0x640f4566; BYTE $0xe8 // pcmpgtb xmm13, xmm8 LONG $0x0654b60f; BYTE $0x15 // movzx edx, byte [rsi + rax + 21] @@ -50398,7 +51875,7 @@ LBB10_192: LONG $0x0654b60f; BYTE $0x1d // movzx edx, byte [rsi + rax + 29] LONG $0xda6e0f66 // movd xmm3, edx QUAD $0x0001009ddf0f4466; BYTE $0x00 // pandn xmm11, oword 256[rbp] /* [rip + .LCPI10_16] */ - QUAD $0x0130249cfc0f4466; WORD $0x0000 // paddb xmm11, oword [rsp + 304] + QUAD $0x0150249cfc0f4466; WORD $0x0000 // paddb xmm11, oword [rsp + 336] LONG $0xcc6f0f66 // movdqa xmm1, xmm4 LONG $0xcf640f66 // pcmpgtb xmm1, xmm7 LONG $0x6f0f4466; BYTE $0xec // movdqa xmm13, xmm4 @@ -50434,7 +51911,7 @@ LBB10_192: QUAD $0x1d2e5c203a0f4266; BYTE $0x02 // pinsrb xmm3, byte [rsi + r13 + 29], 2 QUAD $0x1e2e64203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r13 + 30], 2 QUAD $0x1f2e7c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r13 + 31], 2 - QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x000000a024948b48 // mov rdx, qword [rsp + 160] QUAD $0x03141644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 20], 3 QUAD $0x151644203a0f4466; BYTE $0x03 // pinsrb xmm8, byte [rsi + rdx + 21], 3 QUAD $0x161654203a0f4466; BYTE $0x03 // pinsrb xmm10, byte [rsi + rdx + 22], 3 @@ -50480,7 +51957,6 @@ LBB10_192: QUAD $0x1d365c203a0f4266; BYTE $0x06 // pinsrb xmm3, byte [rsi + r14 + 29], 6 QUAD $0x1e3664203a0f4266; BYTE $0x06 // pinsrb xmm4, byte [rsi + r14 + 30], 6 QUAD $0x1f367c203a0f4666; BYTE $0x06 // pinsrb xmm15, byte [rsi + r14 + 31], 6 - QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] QUAD $0x07141e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 20], 7 QUAD $0x151e44203a0f4466; BYTE $0x07 // pinsrb xmm8, byte [rsi + rbx + 21], 7 QUAD $0x161e54203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rbx + 22], 7 @@ -50514,7 +51990,7 @@ LBB10_192: QUAD $0x1d165c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r10 + 29], 9 QUAD $0x1e1664203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r10 + 30], 9 QUAD $0x1f167c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r10 + 31], 9 - QUAD $0x0000015024948b4c // mov r10, qword [rsp + 336] + QUAD $0x0000011024948b4c // mov r10, qword [rsp + 272] QUAD $0x141644203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r10 + 20], 10 QUAD $0x151644203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r10 + 21], 10 QUAD $0x161654203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r10 + 22], 10 @@ -50606,7 +52082,7 @@ LBB10_192: LONG $0x6f0f4166; BYTE $0xc8 // movdqa xmm1, xmm8 LONG $0x640f4166; BYTE $0xc9 // pcmpgtb xmm1, xmm9 QUAD $0x000001008ddf0f66 // pandn xmm1, oword 256[rbp] /* [rip + .LCPI10_16] */ - QUAD $0x000100248cfc0f66; BYTE $0x00 // paddb xmm1, oword [rsp + 256] + QUAD $0x000120248cfc0f66; BYTE $0x00 // paddb xmm1, oword [rsp + 288] LONG $0x6f0f4166; BYTE $0xe8 // movdqa xmm5, xmm8 LONG $0x640f4166; BYTE $0xec // pcmpgtb xmm5, xmm12 LONG $0x6f0f4166; BYTE $0xf8 // movdqa xmm7, xmm8 @@ -50626,6 +52102,7 @@ LBB10_192: LONG $0x6f0f4166; BYTE $0xd8 // movdqa xmm3, xmm8 LONG $0xdc640f66 // pcmpgtb xmm3, xmm4 QUAD $0x1f267c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r12 + 31], 15 + QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] LONG $0xdf0f4166; BYTE $0xca // pandn xmm1, xmm10 LONG $0xdf0f4166; BYTE $0xdb // pandn xmm3, xmm11 LONG $0xd9eb0f66 // por xmm3, xmm1 @@ -50636,7 +52113,7 @@ LBB10_192: LONG $0xcaeb0f66 // por xmm1, xmm2 LONG $0xd06f0f66 // movdqa xmm2, xmm0 LONG $0xd1600f66 // punpcklbw xmm2, xmm1 - QUAD $0x00009024ac6f0f66; BYTE $0x00 // movdqa xmm5, oword [rsp + 144] + LONG $0x6c6f0f66; WORD $0x7024 // movdqa xmm5, oword [rsp + 112] LONG $0xdd6f0f66 // movdqa xmm3, xmm5 LONG $0x600f4166; BYTE $0xde // punpcklbw xmm3, xmm14 LONG $0xe36f0f66 // movdqa xmm4, xmm3 @@ -50647,24 +52124,24 @@ LBB10_192: LONG $0xcd6f0f66 // movdqa xmm1, xmm5 LONG $0xc8610f66 // punpcklwd xmm1, xmm0 LONG $0xe8690f66 // punpckhwd xmm5, xmm0 - QUAD $0x00000120248c8b48 // mov rcx, qword [rsp + 288] + QUAD $0x00000100248c8b48 // mov rcx, qword [rsp + 256] LONG $0x7f0f41f3; WORD $0x8e6c; BYTE $0x30 // movdqu oword [r14 + 4*rcx + 48], xmm5 LONG $0x7f0f41f3; WORD $0x8e4c; BYTE $0x20 // movdqu oword [r14 + 4*rcx + 32], xmm1 LONG $0x7f0f41f3; WORD $0x8e5c; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm3 LONG $0x7f0f41f3; WORD $0x8e24 // movdqu oword [r14 + 4*rcx], xmm4 LONG $0x10c18348 // add rcx, 16 WORD $0x8949; BYTE $0xc8 // mov r8, rcx - QUAD $0x000000a0248c3b48 // cmp rcx, qword [rsp + 160] - JNE LBB10_192 + QUAD $0x000000f0248c3b48 // cmp rcx, qword [rsp + 240] + JNE LBB10_86 QUAD $0x000001b024bc8b4c // mov r15, qword [rsp + 432] - QUAD $0x000000a024bc3b4c // cmp r15, qword [rsp + 160] + QUAD $0x000000f024bc3b4c // cmp r15, qword [rsp + 240] LONG $0x245c8a44; BYTE $0x08 // mov r11b, byte [rsp + 8] QUAD $0x0000017024b48b48 // mov rsi, qword [rsp + 368] LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] - JNE LBB10_42 - JMP LBB10_128 + JNE LBB10_88 + JMP LBB10_91 -LBB10_194: +LBB10_66: LONG $0xf0e78349 // and r15, -16 WORD $0x894c; BYTE $0xf8 // mov rax, r15 LONG $0x05e0c148 // shl rax, 5 @@ -50672,256 +52149,265 @@ LBB10_194: QUAD $0x0000018824848948 // mov qword [rsp + 392], rax QUAD $0x000001a024bc894c // mov qword [rsp + 416], r15 LONG $0xbe048d4b // lea rax, [r14 + 4*r15] - QUAD $0x000000d024848948 // mov qword [rsp + 208], rax + QUAD $0x000000e024848948 // mov qword [rsp + 224], rax LONG $0xc3b60f41 // movzx eax, r11b LONG $0xc86e0f66 // movd xmm1, eax LONG $0xc0ef0f66 // pxor xmm0, xmm0 LONG $0x00380f66; BYTE $0xc8 // pshufb xmm1, xmm0 QUAD $0x000190248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 400], xmm1 - WORD $0xd231 // xor edx, edx + WORD $0xc031 // xor eax, eax QUAD $0x0000016024b4894c // mov qword [rsp + 352], r14 -LBB10_195: - QUAD $0x0000015024948948 // mov qword [rsp + 336], rdx - LONG $0x05e2c148 // shl rdx, 5 - WORD $0x8948; BYTE $0xd3 // mov rbx, rdx - WORD $0x8949; BYTE $0xd3 // mov r11, rdx - WORD $0x8949; BYTE $0xd4 // mov r12, rdx - QUAD $0x0000009024948948 // mov qword [rsp + 144], rdx - WORD $0x8949; BYTE $0xd0 // mov r8, rdx - WORD $0x8949; BYTE $0xd5 // mov r13, rdx - WORD $0x8949; BYTE $0xd1 // mov r9, rdx - WORD $0x8949; BYTE $0xd2 // mov r10, rdx - WORD $0x8949; BYTE $0xd6 // mov r14, rdx - WORD $0x8948; BYTE $0xd7 // mov rdi, rdx - WORD $0x8949; BYTE $0xd7 // mov r15, rdx - LONG $0x160cb60f // movzx ecx, byte [rsi + rdx] +LBB10_67: + WORD $0x8949; BYTE $0xc1 // mov r9, rax + QUAD $0x0000015024848948 // mov qword [rsp + 336], rax + LONG $0x05e1c149 // shl r9, 5 + WORD $0x894c; BYTE $0xcb // mov rbx, r9 + WORD $0x894c; BYTE $0xcf // mov rdi, r9 + WORD $0x894c; BYTE $0xca // mov rdx, r9 + LONG $0x244c894c; BYTE $0x30 // mov qword [rsp + 48], r9 + WORD $0x894d; BYTE $0xce // mov r14, r9 + WORD $0x894d; BYTE $0xc8 // mov r8, r9 + WORD $0x894d; BYTE $0xca // mov r10, r9 + WORD $0x894d; BYTE $0xcc // mov r12, r9 + WORD $0x894d; BYTE $0xcb // mov r11, r9 + WORD $0x894d; BYTE $0xcd // mov r13, r9 + QUAD $0x000000d0248c894c // mov qword [rsp + 208], r9 + LONG $0x0cb60f42; BYTE $0x0e // movzx ecx, byte [rsi + r9] LONG $0xc16e0f66 // movd xmm0, ecx - LONG $0x164cb60f; BYTE $0x01 // movzx ecx, byte [rsi + rdx + 1] + LONG $0x4cb60f42; WORD $0x010e // movzx ecx, byte [rsi + r9 + 1] LONG $0x6e0f4466; BYTE $0xd9 // movd xmm11, ecx - LONG $0x164cb60f; BYTE $0x02 // movzx ecx, byte [rsi + rdx + 2] + LONG $0x4cb60f42; WORD $0x020e // movzx ecx, byte [rsi + r9 + 2] LONG $0x6e0f4466; BYTE $0xf1 // movd xmm14, ecx - LONG $0x164cb60f; BYTE $0x03 // movzx ecx, byte [rsi + rdx + 3] + LONG $0x4cb60f42; WORD $0x030e // movzx ecx, byte [rsi + r9 + 3] LONG $0xe96e0f66 // movd xmm5, ecx - LONG $0x164cb60f; BYTE $0x04 // movzx ecx, byte [rsi + rdx + 4] + LONG $0x4cb60f42; WORD $0x040e // movzx ecx, byte [rsi + r9 + 4] LONG $0xd96e0f66 // movd xmm3, ecx - LONG $0x164cb60f; BYTE $0x05 // movzx ecx, byte [rsi + rdx + 5] + LONG $0x4cb60f42; WORD $0x050e // movzx ecx, byte [rsi + r9 + 5] LONG $0xc96e0f66 // movd xmm1, ecx - LONG $0x164cb60f; BYTE $0x06 // movzx ecx, byte [rsi + rdx + 6] + LONG $0x4cb60f42; WORD $0x060e // movzx ecx, byte [rsi + r9 + 6] LONG $0xe16e0f66 // movd xmm4, ecx - LONG $0x164cb60f; BYTE $0x07 // movzx ecx, byte [rsi + rdx + 7] + LONG $0x4cb60f42; WORD $0x070e // movzx ecx, byte [rsi + r9 + 7] LONG $0xd16e0f66 // movd xmm2, ecx - QUAD $0x00017024947f0f66; BYTE $0x00 // movdqa oword [rsp + 368], xmm2 - LONG $0x164cb60f; BYTE $0x08 // movzx ecx, byte [rsi + rdx + 8] + QUAD $0x00014024947f0f66; BYTE $0x00 // movdqa oword [rsp + 320], xmm2 + LONG $0x4cb60f42; WORD $0x080e // movzx ecx, byte [rsi + r9 + 8] LONG $0x6e0f4466; BYTE $0xe9 // movd xmm13, ecx - LONG $0x164cb60f; BYTE $0x09 // movzx ecx, byte [rsi + rdx + 9] + LONG $0x4cb60f42; WORD $0x090e // movzx ecx, byte [rsi + r9 + 9] LONG $0xd16e0f66 // movd xmm2, ecx - QUAD $0x0000a024947f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm2 - LONG $0x164cb60f; BYTE $0x0a // movzx ecx, byte [rsi + rdx + 10] + QUAD $0x00011024947f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm2 + LONG $0x4cb60f42; WORD $0x0a0e // movzx ecx, byte [rsi + r9 + 10] LONG $0x6e0f4466; BYTE $0xc1 // movd xmm8, ecx - LONG $0x164cb60f; BYTE $0x0b // movzx ecx, byte [rsi + rdx + 11] + LONG $0x4cb60f42; WORD $0x0b0e // movzx ecx, byte [rsi + r9 + 11] LONG $0x6e0f4466; BYTE $0xd1 // movd xmm10, ecx - LONG $0x164cb60f; BYTE $0x0c // movzx ecx, byte [rsi + rdx + 12] + LONG $0x4cb60f42; WORD $0x0c0e // movzx ecx, byte [rsi + r9 + 12] LONG $0xf16e0f66 // movd xmm6, ecx - LONG $0x164cb60f; BYTE $0x10 // movzx ecx, byte [rsi + rdx + 16] + LONG $0x4cb60f42; WORD $0x100e // movzx ecx, byte [rsi + r9 + 16] LONG $0x6e0f4466; BYTE $0xe1 // movd xmm12, ecx - LONG $0x164cb60f; BYTE $0x18 // movzx ecx, byte [rsi + rdx + 24] + LONG $0x4cb60f42; WORD $0x180e // movzx ecx, byte [rsi + r9 + 24] LONG $0xd16e0f66 // movd xmm2, ecx - QUAD $0x0000008024948948 // mov qword [rsp + 128], rdx - WORD $0x8948; BYTE $0xd0 // mov rax, rdx - LONG $0x20c88348 // or rax, 32 - LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax + QUAD $0x00000090248c894c // mov qword [rsp + 144], r9 + WORD $0x894d; BYTE $0xcf // mov r15, r9 + LONG $0x20cf8349 // or r15, 32 + LONG $0x247c894c; BYTE $0x50 // mov qword [rsp + 80], r15 LONG $0x40cb8348 // or rbx, 64 - LONG $0x60cb8349 // or r11, 96 - QUAD $0x00000120249c894c // mov qword [rsp + 288], r11 - LONG $0x80cc8149; WORD $0x0000; BYTE $0x00 // or r12, 128 - LONG $0x2464894c; BYTE $0x60 // mov qword [rsp + 96], r12 - QUAD $0x00000090248c8148; LONG $0x000000a0 // or qword [rsp + 144], 160 - LONG $0xc0c88149; WORD $0x0000; BYTE $0x00 // or r8, 192 - LONG $0xe0cd8149; WORD $0x0000; BYTE $0x00 // or r13, 224 - QUAD $0x0000013024ac894c // mov qword [rsp + 304], r13 - LONG $0x00c98149; WORD $0x0001; BYTE $0x00 // or r9, 256 - LONG $0x20ca8149; WORD $0x0001; BYTE $0x00 // or r10, 288 - QUAD $0x000001102494894c // mov qword [rsp + 272], r10 - LONG $0x40ce8149; WORD $0x0001; BYTE $0x00 // or r14, 320 - LONG $0x60cf8148; WORD $0x0001; BYTE $0x00 // or rdi, 352 - LONG $0x247c8948; BYTE $0x50 // mov qword [rsp + 80], rdi - LONG $0x80cf8149; WORD $0x0001; BYTE $0x00 // or r15, 384 - WORD $0x8948; BYTE $0xd0 // mov rax, rdx + QUAD $0x00000080249c8948 // mov qword [rsp + 128], rbx + LONG $0x60cf8348 // or rdi, 96 + QUAD $0x000000c024bc8948 // mov qword [rsp + 192], rdi + LONG $0x80ca8148; WORD $0x0000; BYTE $0x00 // or rdx, 128 + LONG $0x24548948; BYTE $0x70 // mov qword [rsp + 112], rdx + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + LONG $0xa0c98148; WORD $0x0000; BYTE $0x00 // or rcx, 160 + LONG $0xc0ce8149; WORD $0x0000; BYTE $0x00 // or r14, 192 + LONG $0xe0c88149; WORD $0x0000; BYTE $0x00 // or r8, 224 + LONG $0x00ca8149; WORD $0x0001; BYTE $0x00 // or r10, 256 + LONG $0x20cc8149; WORD $0x0001; BYTE $0x00 // or r12, 288 + LONG $0x40cb8149; WORD $0x0001; BYTE $0x00 // or r11, 320 + LONG $0x60cd8149; WORD $0x0001; BYTE $0x00 // or r13, 352 + QUAD $0x000000a024ac894c // mov qword [rsp + 160], r13 + QUAD $0x000000d024ac8b4c // mov r13, qword [rsp + 208] + LONG $0x80cd8149; WORD $0x0001; BYTE $0x00 // or r13, 384 + QUAD $0x000000d024ac894c // mov qword [rsp + 208], r13 + WORD $0x894c; BYTE $0xc8 // mov rax, r9 LONG $0x01a00d48; WORD $0x0000 // or rax, 416 - WORD $0x8948; BYTE $0xd1 // mov rcx, rdx - LONG $0x24548948; BYTE $0x10 // mov qword [rsp + 16], rdx - QUAD $0x0001c010244c8148; BYTE $0x00 // or qword [rsp + 16], 448 - LONG $0xe0ca8148; WORD $0x0001; BYTE $0x00 // or rdx, 480 - LONG $0x24548948; BYTE $0x30 // mov qword [rsp + 48], rdx - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] - LONG $0x203a0f66; WORD $0x0e04; BYTE $0x01 // pinsrb xmm0, byte [rsi + rcx], 1 + LONG $0x24448948; BYTE $0x60 // mov qword [rsp + 96], rax + WORD $0x894c; BYTE $0xc8 // mov rax, r9 + LONG $0x01c00d48; WORD $0x0000 // or rax, 448 + LONG $0x24448948; BYTE $0x10 // mov qword [rsp + 16], rax + WORD $0x894c; BYTE $0xc8 // mov rax, r9 + LONG $0x01e00d48; WORD $0x0000 // or rax, 480 + LONG $0x24448948; BYTE $0x20 // mov qword [rsp + 32], rax + QUAD $0x013e04203a0f4266 // pinsrb xmm0, byte [rsi + r15], 1 LONG $0x203a0f66; WORD $0x1e04; BYTE $0x02 // pinsrb xmm0, byte [rsi + rbx], 2 - QUAD $0x031e04203a0f4266 // pinsrb xmm0, byte [rsi + r11], 3 - QUAD $0x042604203a0f4266 // pinsrb xmm0, byte [rsi + r12], 4 - QUAD $0x00000090249c8b4c // mov r11, qword [rsp + 144] - QUAD $0x051e04203a0f4266 // pinsrb xmm0, byte [rsi + r11], 5 - WORD $0x894c; BYTE $0xc2 // mov rdx, r8 - QUAD $0x060604203a0f4266 // pinsrb xmm0, byte [rsi + r8], 6 - QUAD $0x072e04203a0f4266 // pinsrb xmm0, byte [rsi + r13], 7 - QUAD $0x080e04203a0f4266 // pinsrb xmm0, byte [rsi + r9], 8 - WORD $0x894d; BYTE $0xc8 // mov r8, r9 - QUAD $0x000000e0248c894c // mov qword [rsp + 224], r9 - QUAD $0x091604203a0f4266 // pinsrb xmm0, byte [rsi + r10], 9 - WORD $0x894d; BYTE $0xf1 // mov r9, r14 - QUAD $0x0a3604203a0f4266 // pinsrb xmm0, byte [rsi + r14], 10 - LONG $0x203a0f66; WORD $0x3e04; BYTE $0x0b // pinsrb xmm0, byte [rsi + rdi], 11 - QUAD $0x0c3e04203a0f4266 // pinsrb xmm0, byte [rsi + r15], 12 - LONG $0x203a0f66; WORD $0x0604; BYTE $0x0d // pinsrb xmm0, byte [rsi + rax], 13 + LONG $0x203a0f66; WORD $0x3e04; BYTE $0x03 // pinsrb xmm0, byte [rsi + rdi], 3 + LONG $0x203a0f66; WORD $0x1604; BYTE $0x04 // pinsrb xmm0, byte [rsi + rdx], 4 + LONG $0x244c8948; BYTE $0x30 // mov qword [rsp + 48], rcx + LONG $0x203a0f66; WORD $0x0e04; BYTE $0x05 // pinsrb xmm0, byte [rsi + rcx], 5 + QUAD $0x063604203a0f4266 // pinsrb xmm0, byte [rsi + r14], 6 + QUAD $0x070604203a0f4266 // pinsrb xmm0, byte [rsi + r8], 7 + WORD $0x894d; BYTE $0xd1 // mov r9, r10 + QUAD $0x081604203a0f4266 // pinsrb xmm0, byte [rsi + r10], 8 + QUAD $0x092604203a0f4266 // pinsrb xmm0, byte [rsi + r12], 9 + QUAD $0x0a1e04203a0f4266 // pinsrb xmm0, byte [rsi + r11], 10 + QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] + QUAD $0x0b3e04203a0f4266 // pinsrb xmm0, byte [rsi + r15], 11 + QUAD $0x0c2e04203a0f4266 // pinsrb xmm0, byte [rsi + r13], 12 + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + LONG $0x203a0f66; WORD $0x3e04; BYTE $0x0d // pinsrb xmm0, byte [rsi + rdi], 13 LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] LONG $0x203a0f66; WORD $0x0e04; BYTE $0x0e // pinsrb xmm0, byte [rsi + rcx], 14 - LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] - LONG $0x203a0f66; WORD $0x0e04; BYTE $0x0f // pinsrb xmm0, byte [rsi + rcx], 15 + LONG $0x203a0f66; WORD $0x0604; BYTE $0x0f // pinsrb xmm0, byte [rsi + rax], 15 LONG $0x6f0f4466; BYTE $0xc8 // movdqa xmm9, xmm0 QUAD $0x00019024bc6f0f66; BYTE $0x00 // movdqa xmm7, oword [rsp + 400] LONG $0xde0f4466; BYTE $0xcf // pmaxub xmm9, xmm7 LONG $0x6f0f4466; BYTE $0xff // movdqa xmm15, xmm7 LONG $0x740f4466; BYTE $0xc8 // pcmpeqb xmm9, xmm0 LONG $0x6f0f4166; BYTE $0xc1 // movdqa xmm0, xmm9 - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] - QUAD $0x010e5c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rcx + 1], 1 + LONG $0x24548b4c; BYTE $0x50 // mov r10, qword [rsp + 80] + QUAD $0x01165c203a0f4666; BYTE $0x01 // pinsrb xmm11, byte [rsi + r10 + 1], 1 QUAD $0x011e5c203a0f4466; BYTE $0x02 // pinsrb xmm11, byte [rsi + rbx + 1], 2 - QUAD $0x0000012024b48b4c // mov r14, qword [rsp + 288] - QUAD $0x01365c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r14 + 1], 3 - QUAD $0x01265c203a0f4666; BYTE $0x04 // pinsrb xmm11, byte [rsi + r12 + 1], 4 - QUAD $0x011e5c203a0f4666; BYTE $0x05 // pinsrb xmm11, byte [rsi + r11 + 1], 5 - QUAD $0x01165c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rdx + 1], 6 - QUAD $0x012e5c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r13 + 1], 7 - QUAD $0x01065c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r8 + 1], 8 - QUAD $0x01165c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r10 + 1], 9 - QUAD $0x010e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r9 + 1], 10 - QUAD $0x013e5c203a0f4466; BYTE $0x0b // pinsrb xmm11, byte [rsi + rdi + 1], 11 - QUAD $0x013e5c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r15 + 1], 12 - QUAD $0x01065c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rax + 1], 13 - LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] - QUAD $0x01065c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r8 + 1], 14 - LONG $0x24448b4c; BYTE $0x30 // mov r8, qword [rsp + 48] - QUAD $0x01065c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r8 + 1], 15 - QUAD $0x080e6c203a0f4466; BYTE $0x01 // pinsrb xmm13, byte [rsi + rcx + 8], 1 + QUAD $0x000000c024ac8b4c // mov r13, qword [rsp + 192] + QUAD $0x012e5c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r13 + 1], 3 + QUAD $0x01165c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rdx + 1], 4 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x010e5c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rcx + 1], 5 + QUAD $0x01365c203a0f4666; BYTE $0x06 // pinsrb xmm11, byte [rsi + r14 + 1], 6 + QUAD $0x01065c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r8 + 1], 7 + QUAD $0x010e5c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r9 + 1], 8 + WORD $0x894c; BYTE $0xe0 // mov rax, r12 + QUAD $0x0000013024a4894c // mov qword [rsp + 304], r12 + QUAD $0x01265c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r12 + 1], 9 + QUAD $0x011e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r11 + 1], 10 + QUAD $0x013e5c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r15 + 1], 11 + QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] + QUAD $0x01265c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r12 + 1], 12 + QUAD $0x013e5c203a0f4466; BYTE $0x0d // pinsrb xmm11, byte [rsi + rdi + 1], 13 + LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] + QUAD $0x01265c203a0f4666; BYTE $0x0e // pinsrb xmm11, byte [rsi + r12 + 1], 14 + LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] + QUAD $0x013e5c203a0f4466; BYTE $0x0f // pinsrb xmm11, byte [rsi + rdi + 1], 15 + QUAD $0x08166c203a0f4666; BYTE $0x01 // pinsrb xmm13, byte [rsi + r10 + 8], 1 QUAD $0x081e6c203a0f4466; BYTE $0x02 // pinsrb xmm13, byte [rsi + rbx + 8], 2 - QUAD $0x08366c203a0f4666; BYTE $0x03 // pinsrb xmm13, byte [rsi + r14 + 8], 3 - QUAD $0x08266c203a0f4666; BYTE $0x04 // pinsrb xmm13, byte [rsi + r12 + 8], 4 - QUAD $0x081e6c203a0f4666; BYTE $0x05 // pinsrb xmm13, byte [rsi + r11 + 8], 5 - QUAD $0x08166c203a0f4466; BYTE $0x06 // pinsrb xmm13, byte [rsi + rdx + 8], 6 - QUAD $0x082e6c203a0f4666; BYTE $0x07 // pinsrb xmm13, byte [rsi + r13 + 8], 7 - QUAD $0x000000e024ac8b4c // mov r13, qword [rsp + 224] - QUAD $0x082e6c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r13 + 8], 8 - QUAD $0x08166c203a0f4666; BYTE $0x09 // pinsrb xmm13, byte [rsi + r10 + 8], 9 - QUAD $0x080e6c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r9 + 8], 10 - QUAD $0x083e6c203a0f4466; BYTE $0x0b // pinsrb xmm13, byte [rsi + rdi + 8], 11 - QUAD $0x083e6c203a0f4666; BYTE $0x0c // pinsrb xmm13, byte [rsi + r15 + 8], 12 + QUAD $0x082e6c203a0f4666; BYTE $0x03 // pinsrb xmm13, byte [rsi + r13 + 8], 3 + QUAD $0x08166c203a0f4466; BYTE $0x04 // pinsrb xmm13, byte [rsi + rdx + 8], 4 + QUAD $0x080e6c203a0f4466; BYTE $0x05 // pinsrb xmm13, byte [rsi + rcx + 8], 5 + QUAD $0x08366c203a0f4666; BYTE $0x06 // pinsrb xmm13, byte [rsi + r14 + 8], 6 + QUAD $0x08066c203a0f4666; BYTE $0x07 // pinsrb xmm13, byte [rsi + r8 + 8], 7 + QUAD $0x080e6c203a0f4666; BYTE $0x08 // pinsrb xmm13, byte [rsi + r9 + 8], 8 + QUAD $0x08066c203a0f4466; BYTE $0x09 // pinsrb xmm13, byte [rsi + rax + 8], 9 + QUAD $0x081e6c203a0f4666; BYTE $0x0a // pinsrb xmm13, byte [rsi + r11 + 8], 10 + QUAD $0x083e6c203a0f4666; BYTE $0x0b // pinsrb xmm13, byte [rsi + r15 + 8], 11 + QUAD $0x000000d024848b48 // mov rax, qword [rsp + 208] + QUAD $0x08066c203a0f4466; BYTE $0x0c // pinsrb xmm13, byte [rsi + rax + 8], 12 + WORD $0x8948; BYTE $0xc7 // mov rdi, rax + LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] QUAD $0x08066c203a0f4466; BYTE $0x0d // pinsrb xmm13, byte [rsi + rax + 8], 13 - LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] - QUAD $0x080e6c203a0f4466; BYTE $0x0e // pinsrb xmm13, byte [rsi + rcx + 8], 14 - QUAD $0x08066c203a0f4666; BYTE $0x0f // pinsrb xmm13, byte [rsi + r8 + 8], 15 + QUAD $0x08266c203a0f4666; BYTE $0x0e // pinsrb xmm13, byte [rsi + r12 + 8], 14 + LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + QUAD $0x08066c203a0f4466; BYTE $0x0f // pinsrb xmm13, byte [rsi + rax + 8], 15 LONG $0x6f0f4566; BYTE $0xcd // movdqa xmm9, xmm13 LONG $0xde0f4466; BYTE $0xcf // pmaxub xmm9, xmm7 LONG $0x740f4566; BYTE $0xcd // pcmpeqb xmm9, xmm13 - LONG $0x247c8b48; BYTE $0x20 // mov rdi, qword [rsp + 32] - QUAD $0x103e64203a0f4466; BYTE $0x01 // pinsrb xmm12, byte [rsi + rdi + 16], 1 + QUAD $0x101664203a0f4666; BYTE $0x01 // pinsrb xmm12, byte [rsi + r10 + 16], 1 QUAD $0x101e64203a0f4466; BYTE $0x02 // pinsrb xmm12, byte [rsi + rbx + 16], 2 - WORD $0x8949; BYTE $0xda // mov r10, rbx - QUAD $0x103664203a0f4666; BYTE $0x03 // pinsrb xmm12, byte [rsi + r14 + 16], 3 - QUAD $0x102664203a0f4666; BYTE $0x04 // pinsrb xmm12, byte [rsi + r12 + 16], 4 - QUAD $0x101e64203a0f4666; BYTE $0x05 // pinsrb xmm12, byte [rsi + r11 + 16], 5 - QUAD $0x101664203a0f4466; BYTE $0x06 // pinsrb xmm12, byte [rsi + rdx + 16], 6 - WORD $0x8948; BYTE $0xd1 // mov rcx, rdx - QUAD $0x000000f024948948 // mov qword [rsp + 240], rdx - QUAD $0x00000130249c8b4c // mov r11, qword [rsp + 304] - QUAD $0x101e64203a0f4666; BYTE $0x07 // pinsrb xmm12, byte [rsi + r11 + 16], 7 - QUAD $0x102e64203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r13 + 16], 8 - WORD $0x894d; BYTE $0xe8 // mov r8, r13 - QUAD $0x0000011024ac8b4c // mov r13, qword [rsp + 272] - QUAD $0x102e64203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r13 + 16], 9 - QUAD $0x100e64203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r9 + 16], 10 - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] - QUAD $0x101664203a0f4466; BYTE $0x0b // pinsrb xmm12, byte [rsi + rdx + 16], 11 - QUAD $0x103e64203a0f4666; BYTE $0x0c // pinsrb xmm12, byte [rsi + r15 + 16], 12 - QUAD $0x100664203a0f4466; BYTE $0x0d // pinsrb xmm12, byte [rsi + rax + 16], 13 - LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] - QUAD $0x101e64203a0f4466; BYTE $0x0e // pinsrb xmm12, byte [rsi + rbx + 16], 14 - LONG $0x24648b4c; BYTE $0x30 // mov r12, qword [rsp + 48] - QUAD $0x102664203a0f4666; BYTE $0x0f // pinsrb xmm12, byte [rsi + r12 + 16], 15 + QUAD $0x102e64203a0f4666; BYTE $0x03 // pinsrb xmm12, byte [rsi + r13 + 16], 3 + QUAD $0x101664203a0f4466; BYTE $0x04 // pinsrb xmm12, byte [rsi + rdx + 16], 4 + QUAD $0x100e64203a0f4466; BYTE $0x05 // pinsrb xmm12, byte [rsi + rcx + 16], 5 + QUAD $0x103664203a0f4666; BYTE $0x06 // pinsrb xmm12, byte [rsi + r14 + 16], 6 + QUAD $0x100664203a0f4666; BYTE $0x07 // pinsrb xmm12, byte [rsi + r8 + 16], 7 + WORD $0x894c; BYTE $0xc2 // mov rdx, r8 + QUAD $0x000001b02484894c // mov qword [rsp + 432], r8 + QUAD $0x100e64203a0f4666; BYTE $0x08 // pinsrb xmm12, byte [rsi + r9 + 16], 8 + QUAD $0x0000013024a48b4c // mov r12, qword [rsp + 304] + QUAD $0x102664203a0f4666; BYTE $0x09 // pinsrb xmm12, byte [rsi + r12 + 16], 9 + QUAD $0x101e64203a0f4666; BYTE $0x0a // pinsrb xmm12, byte [rsi + r11 + 16], 10 + QUAD $0x000000b0249c894c // mov qword [rsp + 176], r11 + QUAD $0x103e64203a0f4666; BYTE $0x0b // pinsrb xmm12, byte [rsi + r15 + 16], 11 + WORD $0x8948; BYTE $0xf9 // mov rcx, rdi + QUAD $0x103e64203a0f4466; BYTE $0x0c // pinsrb xmm12, byte [rsi + rdi + 16], 12 + LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] + QUAD $0x103e64203a0f4466; BYTE $0x0d // pinsrb xmm12, byte [rsi + rdi + 16], 13 + LONG $0x24448b4c; BYTE $0x10 // mov r8, qword [rsp + 16] + QUAD $0x100664203a0f4666; BYTE $0x0e // pinsrb xmm12, byte [rsi + r8 + 16], 14 + QUAD $0x100664203a0f4466; BYTE $0x0f // pinsrb xmm12, byte [rsi + rax + 16], 15 LONG $0x6f0f4166; BYTE $0xfc // movdqa xmm7, xmm12 LONG $0xde0f4166; BYTE $0xff // pmaxub xmm7, xmm15 LONG $0x740f4166; BYTE $0xfc // pcmpeqb xmm7, xmm12 - QUAD $0x0001b024bc7f0f66; BYTE $0x00 // movdqa oword [rsp + 432], xmm7 - QUAD $0x01183e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 24], 1 - QUAD $0x181654203a0f4266; BYTE $0x02 // pinsrb xmm2, byte [rsi + r10 + 24], 2 - QUAD $0x183654203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r14 + 24], 3 - LONG $0x247c8b48; BYTE $0x60 // mov rdi, qword [rsp + 96] - QUAD $0x04183e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 24], 4 - QUAD $0x0000009024b48b4c // mov r14, qword [rsp + 144] - QUAD $0x183654203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r14 + 24], 5 - QUAD $0x06180e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 24], 6 - QUAD $0x181e54203a0f4266; BYTE $0x07 // pinsrb xmm2, byte [rsi + r11 + 24], 7 - QUAD $0x180654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r8 + 24], 8 - QUAD $0x182e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r13 + 24], 9 - QUAD $0x180e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r9 + 24], 10 - QUAD $0x0b181654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 24], 11 - QUAD $0x183e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r15 + 24], 12 - QUAD $0x0d180654203a0f66 // pinsrb xmm2, byte [rsi + rax + 24], 13 - QUAD $0x0e181e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 24], 14 - QUAD $0x182654203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r12 + 24], 15 + QUAD $0x00017024bc7f0f66; BYTE $0x00 // movdqa oword [rsp + 368], xmm7 + QUAD $0x181654203a0f4266; BYTE $0x01 // pinsrb xmm2, byte [rsi + r10 + 24], 1 + QUAD $0x02181e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 24], 2 + QUAD $0x182e54203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r13 + 24], 3 + LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] + QUAD $0x04181e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 24], 4 + LONG $0x24548b4c; BYTE $0x30 // mov r10, qword [rsp + 48] + QUAD $0x181654203a0f4266; BYTE $0x05 // pinsrb xmm2, byte [rsi + r10 + 24], 5 + QUAD $0x183654203a0f4266; BYTE $0x06 // pinsrb xmm2, byte [rsi + r14 + 24], 6 + QUAD $0x07181654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 24], 7 + QUAD $0x180e54203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r9 + 24], 8 + QUAD $0x182654203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r12 + 24], 9 + QUAD $0x181e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r11 + 24], 10 + QUAD $0x183e54203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r15 + 24], 11 + WORD $0x894d; BYTE $0xfc // mov r12, r15 + QUAD $0x0c180e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 24], 12 + QUAD $0x0d183e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 24], 13 + QUAD $0x180654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r8 + 24], 14 + WORD $0x894d; BYTE $0xc5 // mov r13, r8 + QUAD $0x0f180654203a0f66 // pinsrb xmm2, byte [rsi + rax + 24], 15 LONG $0xfa6f0f66 // movdqa xmm7, xmm2 LONG $0xde0f4166; BYTE $0xff // pmaxub xmm7, xmm15 LONG $0xfa740f66 // pcmpeqb xmm7, xmm2 - QUAD $0x00014024bc7f0f66; BYTE $0x00 // movdqa oword [rsp + 320], xmm7 + QUAD $0x0000f024bc7f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm7 LONG $0x6f0f4566; BYTE $0xe3 // movdqa xmm12, xmm11 LONG $0x6f0f4566; BYTE $0xef // movdqa xmm13, xmm15 LONG $0xde0f4566; BYTE $0xe7 // pmaxub xmm12, xmm15 LONG $0x740f4566; BYTE $0xe3 // pcmpeqb xmm12, xmm11 - QUAD $0x0000008024948b48 // mov rdx, qword [rsp + 128] - LONG $0x1654b60f; BYTE $0x0d // movzx edx, byte [rsi + rdx + 13] + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + LONG $0x0654b60f; BYTE $0x0d // movzx edx, byte [rsi + rax + 13] LONG $0x6e0f4466; BYTE $0xfa // movd xmm15, edx - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] - QUAD $0x020e74203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rsi + rcx + 2], 1 - WORD $0x894c; BYTE $0xd3 // mov rbx, r10 - QUAD $0x021674203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r10 + 2], 2 - QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] - QUAD $0x021674203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r10 + 2], 3 - WORD $0x8948; BYTE $0xfa // mov rdx, rdi - QUAD $0x023e74203a0f4466; BYTE $0x04 // pinsrb xmm14, byte [rsi + rdi + 2], 4 - WORD $0x894c; BYTE $0xf1 // mov rcx, r14 - QUAD $0x023674203a0f4666; BYTE $0x05 // pinsrb xmm14, byte [rsi + r14 + 2], 5 - QUAD $0x000000f024bc8b48 // mov rdi, qword [rsp + 240] - QUAD $0x023e74203a0f4466; BYTE $0x06 // pinsrb xmm14, byte [rsi + rdi + 2], 6 - QUAD $0x021e74203a0f4666; BYTE $0x07 // pinsrb xmm14, byte [rsi + r11 + 2], 7 - QUAD $0x020674203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r8 + 2], 8 - QUAD $0x022e74203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r13 + 2], 9 - QUAD $0x020e74203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rsi + r9 + 2], 10 - LONG $0x24748b4c; BYTE $0x50 // mov r14, qword [rsp + 80] - QUAD $0x023674203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r14 + 2], 11 - QUAD $0x023e74203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r15 + 2], 12 - LONG $0x24448948; BYTE $0x70 // mov qword [rsp + 112], rax - QUAD $0x020674203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rsi + rax + 2], 13 + LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + QUAD $0x020674203a0f4666; BYTE $0x01 // pinsrb xmm14, byte [rsi + r8 + 2], 1 + QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] + QUAD $0x023e74203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r15 + 2], 2 + QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] + QUAD $0x023e74203a0f4466; BYTE $0x03 // pinsrb xmm14, byte [rsi + rdi + 2], 3 + WORD $0x8949; BYTE $0xdb // mov r11, rbx + QUAD $0x021e74203a0f4466; BYTE $0x04 // pinsrb xmm14, byte [rsi + rbx + 2], 4 + QUAD $0x021674203a0f4666; BYTE $0x05 // pinsrb xmm14, byte [rsi + r10 + 2], 5 + QUAD $0x023674203a0f4666; BYTE $0x06 // pinsrb xmm14, byte [rsi + r14 + 2], 6 + QUAD $0x000001b0249c8b48 // mov rbx, qword [rsp + 432] + QUAD $0x021e74203a0f4466; BYTE $0x07 // pinsrb xmm14, byte [rsi + rbx + 2], 7 + WORD $0x894c; BYTE $0xca // mov rdx, r9 + QUAD $0x00000100248c894c // mov qword [rsp + 256], r9 + QUAD $0x020e74203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r9 + 2], 8 + QUAD $0x00000130248c8b4c // mov r9, qword [rsp + 304] + QUAD $0x020e74203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r9 + 2], 9 + QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x020674203a0f4466; BYTE $0x0a // pinsrb xmm14, byte [rsi + rax + 2], 10 + QUAD $0x022674203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r12 + 2], 11 + WORD $0x8949; BYTE $0xcc // mov r12, rcx + QUAD $0x020e74203a0f4466; BYTE $0x0c // pinsrb xmm14, byte [rsi + rcx + 2], 12 + LONG $0x244c8b48; BYTE $0x60 // mov rcx, qword [rsp + 96] + QUAD $0x020e74203a0f4466; BYTE $0x0d // pinsrb xmm14, byte [rsi + rcx + 2], 13 + QUAD $0x022e74203a0f4666; BYTE $0x0e // pinsrb xmm14, byte [rsi + r13 + 2], 14 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x022e74203a0f4666; BYTE $0x0f // pinsrb xmm14, byte [rsi + r13 + 2], 15 + QUAD $0x03066c203a0f4266; BYTE $0x01 // pinsrb xmm5, byte [rsi + r8 + 3], 1 + QUAD $0x033e6c203a0f4266; BYTE $0x02 // pinsrb xmm5, byte [rsi + r15 + 3], 2 + QUAD $0x03033e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 3], 3 + WORD $0x8949; BYTE $0xf8 // mov r8, rdi + QUAD $0x031e6c203a0f4266; BYTE $0x04 // pinsrb xmm5, byte [rsi + r11 + 3], 4 + QUAD $0x03166c203a0f4266; BYTE $0x05 // pinsrb xmm5, byte [rsi + r10 + 3], 5 + QUAD $0x03366c203a0f4266; BYTE $0x06 // pinsrb xmm5, byte [rsi + r14 + 3], 6 + QUAD $0x07031e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 3], 7 + QUAD $0x0803166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 3], 8 + QUAD $0x030e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r9 + 3], 9 + QUAD $0x0a03066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 3], 10 + QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] + QUAD $0x033e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 3], 11 + QUAD $0x03266c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r12 + 3], 12 + QUAD $0x0d030e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 3], 13 LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] - QUAD $0x020674203a0f4466; BYTE $0x0e // pinsrb xmm14, byte [rsi + rax + 2], 14 - QUAD $0x022674203a0f4666; BYTE $0x0f // pinsrb xmm14, byte [rsi + r12 + 2], 15 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0103066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 3], 1 - QUAD $0x02031e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 3], 2 - QUAD $0x03166c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r10 + 3], 3 - QUAD $0x0403166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 3], 4 - WORD $0x8948; BYTE $0xd0 // mov rax, rdx - QUAD $0x05030e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 3], 5 - QUAD $0x06033e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 3], 6 - QUAD $0x031e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r11 + 3], 7 - QUAD $0x03066c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r8 + 3], 8 - QUAD $0x032e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r13 + 3], 9 - QUAD $0x030e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r9 + 3], 10 - QUAD $0x03366c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r14 + 3], 11 - QUAD $0x033e6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r15 + 3], 12 - LONG $0x24748b4c; BYTE $0x70 // mov r14, qword [rsp + 112] - QUAD $0x03366c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r14 + 3], 13 - LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] - QUAD $0x0e03166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 3], 14 - QUAD $0x03266c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r12 + 3], 15 + QUAD $0x0e03066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 3], 14 + QUAD $0x032e6c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r13 + 3], 15 QUAD $0x00000100956f0f66 // movdqa xmm2, oword 256[rbp] /* [rip + .LCPI10_16] */ LONG $0xdb0f4466; BYTE $0xe2 // pand xmm12, xmm2 LONG $0xf80f4466; BYTE $0xe0 // psubb xmm12, xmm0 @@ -50931,54 +52417,60 @@ LBB10_195: LONG $0xd56f0f66 // movdqa xmm2, xmm5 LONG $0xde0f4166; BYTE $0xd5 // pmaxub xmm2, xmm13 LONG $0xd5740f66 // pcmpeqb xmm2, xmm5 - QUAD $0x0000008024948b48 // mov rdx, qword [rsp + 128] - LONG $0x1654b60f; BYTE $0x0e // movzx edx, byte [rsi + rdx + 14] + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + LONG $0x0654b60f; BYTE $0x0e // movzx edx, byte [rsi + rax + 14] LONG $0x6e0f4466; BYTE $0xf2 // movd xmm14, edx - LONG $0x24648b4c; BYTE $0x20 // mov r12, qword [rsp + 32] - QUAD $0x04265c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r12 + 4], 1 - QUAD $0x02041e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 4], 2 - QUAD $0x04165c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r10 + 4], 3 - QUAD $0x0404065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 4], 4 - QUAD $0x05040e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 4], 5 - QUAD $0x06043e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 4], 6 - QUAD $0x041e5c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r11 + 4], 7 - QUAD $0x04065c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r8 + 4], 8 - QUAD $0x042e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r13 + 4], 9 - QUAD $0x040e5c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r9 + 4], 10 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0b04065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 4], 11 - QUAD $0x000000c024bc894c // mov qword [rsp + 192], r15 - QUAD $0x043e5c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r15 + 4], 12 - QUAD $0x04365c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r14 + 4], 13 + LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] + QUAD $0x01043e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 4], 1 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x0204065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 4], 2 + QUAD $0x04065c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r8 + 4], 3 + QUAD $0x041e5c203a0f4266; BYTE $0x04 // pinsrb xmm3, byte [rsi + r11 + 4], 4 + WORD $0x894c; BYTE $0xd1 // mov rcx, r10 + QUAD $0x04165c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r10 + 4], 5 + QUAD $0x04365c203a0f4266; BYTE $0x06 // pinsrb xmm3, byte [rsi + r14 + 4], 6 + QUAD $0x07041e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 4], 7 + QUAD $0x0000010024948b4c // mov r10, qword [rsp + 256] + QUAD $0x04165c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r10 + 4], 8 + QUAD $0x040e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r9 + 4], 9 + QUAD $0x000000b024848b4c // mov r8, qword [rsp + 176] + QUAD $0x04065c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r8 + 4], 10 + QUAD $0x043e5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r15 + 4], 11 + QUAD $0x04265c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r12 + 4], 12 + LONG $0x245c8b4c; BYTE $0x60 // mov r11, qword [rsp + 96] + QUAD $0x041e5c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r11 + 4], 13 LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] QUAD $0x0e04165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 4], 14 - LONG $0x24748b4c; BYTE $0x30 // mov r14, qword [rsp + 48] - QUAD $0x04365c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r14 + 4], 15 - QUAD $0x05264c203a0f4266; BYTE $0x01 // pinsrb xmm1, byte [rsi + r12 + 5], 1 - QUAD $0x02051e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 5], 2 - QUAD $0x05164c203a0f4266; BYTE $0x03 // pinsrb xmm1, byte [rsi + r10 + 5], 3 - LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] - QUAD $0x05264c203a0f4266; BYTE $0x04 // pinsrb xmm1, byte [rsi + r12 + 5], 4 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x042e5c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r13 + 4], 15 + QUAD $0x01053e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 5], 1 + QUAD $0x0205064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 5], 2 + QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] + QUAD $0x03053e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 5], 3 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x04053e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 5], 4 QUAD $0x05050e4c203a0f66 // pinsrb xmm1, byte [rsi + rcx + 5], 5 - QUAD $0x06053e4c203a0f66 // pinsrb xmm1, byte [rsi + rdi + 5], 6 - QUAD $0x051e4c203a0f4266; BYTE $0x07 // pinsrb xmm1, byte [rsi + r11 + 5], 7 - QUAD $0x05064c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r8 + 5], 8 - QUAD $0x052e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r13 + 5], 9 - QUAD $0x050e4c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r9 + 5], 10 - QUAD $0x0b05064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 5], 11 - QUAD $0x053e4c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r15 + 5], 12 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x0d05064c203a0f66 // pinsrb xmm1, byte [rsi + rax + 5], 13 + QUAD $0x05364c203a0f4266; BYTE $0x06 // pinsrb xmm1, byte [rsi + r14 + 5], 6 + QUAD $0x07051e4c203a0f66 // pinsrb xmm1, byte [rsi + rbx + 5], 7 + QUAD $0x05164c203a0f4266; BYTE $0x08 // pinsrb xmm1, byte [rsi + r10 + 5], 8 + WORD $0x894c; BYTE $0xd1 // mov rcx, r10 + QUAD $0x050e4c203a0f4266; BYTE $0x09 // pinsrb xmm1, byte [rsi + r9 + 5], 9 + QUAD $0x05064c203a0f4266; BYTE $0x0a // pinsrb xmm1, byte [rsi + r8 + 5], 10 + QUAD $0x053e4c203a0f4266; BYTE $0x0b // pinsrb xmm1, byte [rsi + r15 + 5], 11 + WORD $0x894d; BYTE $0xfa // mov r10, r15 + QUAD $0x05264c203a0f4266; BYTE $0x0c // pinsrb xmm1, byte [rsi + r12 + 5], 12 + QUAD $0x051e4c203a0f4266; BYTE $0x0d // pinsrb xmm1, byte [rsi + r11 + 5], 13 QUAD $0x0e05164c203a0f66 // pinsrb xmm1, byte [rsi + rdx + 5], 14 QUAD $0x00000110ad6f0f66 // movdqa xmm5, oword 272[rbp] /* [rip + .LCPI10_17] */ LONG $0xdb0f4466; BYTE $0xdd // pand xmm11, xmm5 QUAD $0x00000120ad6f0f66 // movdqa xmm5, oword 288[rbp] /* [rip + .LCPI10_18] */ LONG $0xd5db0f66 // pand xmm2, xmm5 LONG $0xeb0f4166; BYTE $0xd3 // por xmm2, xmm11 - QUAD $0x0000008024bc8b4c // mov r15, qword [rsp + 128] - LONG $0x54b60f42; WORD $0x0f3e // movzx edx, byte [rsi + r15 + 15] + QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] + LONG $0x3e54b60f; BYTE $0x0f // movzx edx, byte [rsi + rdi + 15] LONG $0x6e0f4466; BYTE $0xda // movd xmm11, edx - QUAD $0x05364c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r14 + 5], 15 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x052e4c203a0f4266; BYTE $0x0f // pinsrb xmm1, byte [rsi + r13 + 5], 15 LONG $0xeb0f4166; BYTE $0xd4 // por xmm2, xmm12 LONG $0x6f0f4466; BYTE $0xe3 // movdqa xmm12, xmm3 LONG $0xde0f4566; BYTE $0xe5 // pmaxub xmm12, xmm13 @@ -50986,50 +52478,50 @@ LBB10_195: LONG $0xe96f0f66 // movdqa xmm5, xmm1 LONG $0xde0f4166; BYTE $0xed // pmaxub xmm5, xmm13 LONG $0xe9740f66 // pcmpeqb xmm5, xmm1 - LONG $0x54b60f42; WORD $0x113e // movzx edx, byte [rsi + r15 + 17] + LONG $0x3e54b60f; BYTE $0x11 // movzx edx, byte [rsi + rdi + 17] LONG $0xc26e0f66 // movd xmm0, edx - LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] - QUAD $0x01061664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 6], 1 - QUAD $0x000000b0249c8948 // mov qword [rsp + 176], rbx - QUAD $0x02061e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 6], 2 - QUAD $0x061664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r10 + 6], 3 - QUAD $0x062664203a0f4266; BYTE $0x04 // pinsrb xmm4, byte [rsi + r12 + 6], 4 - QUAD $0x05060e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 6], 5 - QUAD $0x06063e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 6], 6 - QUAD $0x061e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r11 + 6], 7 - QUAD $0x060664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r8 + 6], 8 - QUAD $0x062e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r13 + 6], 9 - QUAD $0x00000100248c894c // mov qword [rsp + 256], r9 - QUAD $0x060e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r9 + 6], 10 - LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] - QUAD $0x062664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r12 + 6], 11 - QUAD $0x000000c024b48b4c // mov r14, qword [rsp + 192] - QUAD $0x063664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r14 + 6], 12 - QUAD $0x0d060664203a0f66 // pinsrb xmm4, byte [rsi + rax + 6], 13 - LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] - QUAD $0x063e64203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r15 + 6], 14 - LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] - QUAD $0x063e64203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r15 + 6], 15 - QUAD $0x000170249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 368] - QUAD $0x0107165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 7], 1 - QUAD $0x02071e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 7], 2 - QUAD $0x07165c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r10 + 7], 3 - WORD $0x894c; BYTE $0xd3 // mov rbx, r10 + LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + QUAD $0x060664203a0f4266; BYTE $0x01 // pinsrb xmm4, byte [rsi + r8 + 6], 1 + QUAD $0x02060664203a0f66 // pinsrb xmm4, byte [rsi + rax + 6], 2 + QUAD $0x000000c024bc8b48 // mov rdi, qword [rsp + 192] + QUAD $0x03063e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 6], 3 + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] + QUAD $0x04060664203a0f66 // pinsrb xmm4, byte [rsi + rax + 6], 4 + LONG $0x245c8b4c; BYTE $0x30 // mov r11, qword [rsp + 48] + QUAD $0x061e64203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r11 + 6], 5 + QUAD $0x063664203a0f4266; BYTE $0x06 // pinsrb xmm4, byte [rsi + r14 + 6], 6 + QUAD $0x07061e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 6], 7 + QUAD $0x08060e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 6], 8 + QUAD $0x060e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 6], 9 + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x063e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r15 + 6], 10 + QUAD $0x061664203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r10 + 6], 11 + QUAD $0x062664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 6], 12 LONG $0x24548b48; BYTE $0x60 // mov rdx, qword [rsp + 96] - QUAD $0x0407165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 7], 4 - QUAD $0x05070e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 7], 5 - QUAD $0x06073e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 7], 6 - QUAD $0x071e5c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r11 + 7], 7 - QUAD $0x07065c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r8 + 7], 8 - QUAD $0x072e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r13 + 7], 9 - QUAD $0x070e5c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r9 + 7], 10 - QUAD $0x07265c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r12 + 7], 11 - QUAD $0x07365c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r14 + 7], 12 - QUAD $0x0d07065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 7], 13 - LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] - QUAD $0x0e073e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 7], 14 - WORD $0x894c; BYTE $0xf9 // mov rcx, r15 - QUAD $0x073e5c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r15 + 7], 15 + QUAD $0x0d061664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 6], 13 + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] + QUAD $0x0e060e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 6], 14 + QUAD $0x062e64203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r13 + 6], 15 + QUAD $0x000140249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 320] + QUAD $0x07065c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r8 + 7], 1 + QUAD $0x0000008024848b4c // mov r8, qword [rsp + 128] + QUAD $0x07065c203a0f4266; BYTE $0x02 // pinsrb xmm3, byte [rsi + r8 + 7], 2 + QUAD $0x03073e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 7], 3 + QUAD $0x0407065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 7], 4 + WORD $0x894c; BYTE $0xdf // mov rdi, r11 + QUAD $0x071e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r11 + 7], 5 + QUAD $0x07365c203a0f4266; BYTE $0x06 // pinsrb xmm3, byte [rsi + r14 + 7], 6 + QUAD $0x0000012024b4894c // mov qword [rsp + 288], r14 + QUAD $0x07071e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 7], 7 + QUAD $0x00000100249c8b4c // mov r11, qword [rsp + 256] + QUAD $0x071e5c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r11 + 7], 8 + QUAD $0x070e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r9 + 7], 9 + QUAD $0x073e5c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r15 + 7], 10 + QUAD $0x07165c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r10 + 7], 11 + QUAD $0x07265c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r12 + 7], 12 + QUAD $0x0d07165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 7], 13 + QUAD $0x0e070e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 7], 14 + QUAD $0x072e5c203a0f4266; BYTE $0x0f // pinsrb xmm3, byte [rsi + r13 + 7], 15 QUAD $0x000001308d6f0f66 // movdqa xmm1, oword 304[rbp] /* [rip + .LCPI10_19] */ LONG $0xdb0f4466; BYTE $0xe1 // pand xmm12, xmm1 QUAD $0x000001408d6f0f66 // movdqa xmm1, oword 320[rbp] /* [rip + .LCPI10_20] */ @@ -51038,102 +52530,97 @@ LBB10_195: LONG $0xcc6f0f66 // movdqa xmm1, xmm4 LONG $0xde0f4166; BYTE $0xcd // pmaxub xmm1, xmm13 LONG $0xcc740f66 // pcmpeqb xmm1, xmm4 - QUAD $0x0000008024848b4c // mov r8, qword [rsp + 128] - LONG $0x54b60f42; WORD $0x1206 // movzx edx, byte [rsi + r8 + 18] + QUAD $0x0000009024948b4c // mov r10, qword [rsp + 144] + LONG $0x54b60f42; WORD $0x1216 // movzx edx, byte [rsi + r10 + 18] LONG $0xe26e0f66 // movd xmm4, edx QUAD $0x00000150bd6f0f66 // movdqa xmm7, oword 336[rbp] /* [rip + .LCPI10_21] */ LONG $0xcfdb0f66 // pand xmm1, xmm7 LONG $0xcdeb0f66 // por xmm1, xmm5 - LONG $0x54b60f42; WORD $0x1306 // movzx edx, byte [rsi + r8 + 19] + LONG $0x54b60f42; WORD $0x1316 // movzx edx, byte [rsi + r10 + 19] LONG $0xea6e0f66 // movd xmm5, edx LONG $0xcaeb0f66 // por xmm1, xmm2 LONG $0xd36f0f66 // movdqa xmm2, xmm3 LONG $0xde0f4166; BYTE $0xd5 // pmaxub xmm2, xmm13 LONG $0xd3740f66 // pcmpeqb xmm2, xmm3 LONG $0x6f0f4466; BYTE $0xe2 // movdqa xmm12, xmm2 - LONG $0x54b60f42; WORD $0x1406 // movzx edx, byte [rsi + r8 + 20] + LONG $0x54b60f42; WORD $0x1416 // movzx edx, byte [rsi + r10 + 20] LONG $0xd26e0f66 // movd xmm2, edx - QUAD $0x0000a0249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 160] - LONG $0x24748b4c; BYTE $0x20 // mov r14, qword [rsp + 32] - QUAD $0x09365c203a0f4266; BYTE $0x01 // pinsrb xmm3, byte [rsi + r14 + 9], 1 - QUAD $0x000000b024948b4c // mov r10, qword [rsp + 176] - QUAD $0x09165c203a0f4266; BYTE $0x02 // pinsrb xmm3, byte [rsi + r10 + 9], 2 - QUAD $0x03091e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 9], 3 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x0409065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 9], 4 - QUAD $0x00000090248c8b4c // mov r9, qword [rsp + 144] - QUAD $0x090e5c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r9 + 9], 5 - QUAD $0x000000f0249c8b4c // mov r11, qword [rsp + 240] - QUAD $0x091e5c203a0f4266; BYTE $0x06 // pinsrb xmm3, byte [rsi + r11 + 9], 6 - QUAD $0x00000130249c8b48 // mov rbx, qword [rsp + 304] + QUAD $0x000110249c6f0f66; BYTE $0x00 // movdqa xmm3, oword [rsp + 272] + LONG $0x247c8b48; BYTE $0x50 // mov rdi, qword [rsp + 80] + QUAD $0x01093e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 9], 1 + WORD $0x894c; BYTE $0xc0 // mov rax, r8 + QUAD $0x09065c203a0f4266; BYTE $0x02 // pinsrb xmm3, byte [rsi + r8 + 9], 2 + QUAD $0x000000c0248c8b48 // mov rcx, qword [rsp + 192] + QUAD $0x03090e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 9], 3 + LONG $0x244c8b48; BYTE $0x70 // mov rcx, qword [rsp + 112] + QUAD $0x04090e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 9], 4 + LONG $0x24448b4c; BYTE $0x30 // mov r8, qword [rsp + 48] + QUAD $0x09065c203a0f4266; BYTE $0x05 // pinsrb xmm3, byte [rsi + r8 + 9], 5 + QUAD $0x09365c203a0f4266; BYTE $0x06 // pinsrb xmm3, byte [rsi + r14 + 9], 6 QUAD $0x07091e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 9], 7 - QUAD $0x000000e024bc8b4c // mov r15, qword [rsp + 224] - QUAD $0x093e5c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r15 + 9], 8 - QUAD $0x092e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r13 + 9], 9 - QUAD $0x0000010024a48b4c // mov r12, qword [rsp + 256] - QUAD $0x09265c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r12 + 9], 10 - LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] - QUAD $0x0b09165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 9], 11 - QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] + QUAD $0x091e5c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r11 + 9], 8 + QUAD $0x090e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r9 + 9], 9 + QUAD $0x093e5c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r15 + 9], 10 + QUAD $0x000000a024a48b4c // mov r12, qword [rsp + 160] + QUAD $0x09265c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r12 + 9], 11 + QUAD $0x000000d024948b48 // mov rdx, qword [rsp + 208] QUAD $0x0c09165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 9], 12 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] - QUAD $0x0d09165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 9], 13 - QUAD $0x0e093e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 9], 14 - QUAD $0x0f090e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 9], 15 + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] + QUAD $0x092e5c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r13 + 9], 13 + LONG $0x24548b48; BYTE $0x10 // mov rdx, qword [rsp + 16] + QUAD $0x0e09165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 9], 14 + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + QUAD $0x0f09165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 9], 15 LONG $0x7d6f0f66; BYTE $0x60 // movdqa xmm7, oword 96[rbp] /* [rip + .LCPI10_6] */ LONG $0xdb0f4466; BYTE $0xe7 // pand xmm12, xmm7 LONG $0xeb0f4466; BYTE $0xe1 // por xmm12, xmm1 - QUAD $0x00a024a47f0f4466; WORD $0x0000 // movdqa oword [rsp + 160], xmm12 + QUAD $0x011024a47f0f4466; WORD $0x0000 // movdqa oword [rsp + 272], xmm12 LONG $0xfb6f0f66 // movdqa xmm7, xmm3 LONG $0xde0f4166; BYTE $0xfd // pmaxub xmm7, xmm13 LONG $0xfb740f66 // pcmpeqb xmm7, xmm3 - LONG $0x54b60f42; WORD $0x1506 // movzx edx, byte [rsi + r8 + 21] + LONG $0x54b60f42; WORD $0x1516 // movzx edx, byte [rsi + r10 + 21] LONG $0xda6e0f66 // movd xmm3, edx - QUAD $0x0a3644203a0f4666; BYTE $0x01 // pinsrb xmm8, byte [rsi + r14 + 10], 1 - WORD $0x894d; BYTE $0xd6 // mov r14, r10 - QUAD $0x0a1644203a0f4666; BYTE $0x02 // pinsrb xmm8, byte [rsi + r10 + 10], 2 - QUAD $0x0000012024948b4c // mov r10, qword [rsp + 288] + QUAD $0x0a3e44203a0f4466; BYTE $0x01 // pinsrb xmm8, byte [rsi + rdi + 10], 1 + QUAD $0x0a0644203a0f4466; BYTE $0x02 // pinsrb xmm8, byte [rsi + rax + 10], 2 + WORD $0x8949; BYTE $0xc6 // mov r14, rax + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] QUAD $0x0a1644203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r10 + 10], 3 - QUAD $0x0a0644203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rax + 10], 4 - WORD $0x894c; BYTE $0xc9 // mov rcx, r9 - QUAD $0x0a0e44203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r9 + 10], 5 - WORD $0x894c; BYTE $0xdf // mov rdi, r11 - QUAD $0x0a1e44203a0f4666; BYTE $0x06 // pinsrb xmm8, byte [rsi + r11 + 10], 6 - WORD $0x8949; BYTE $0xdb // mov r11, rbx + QUAD $0x0a0e44203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rcx + 10], 4 + QUAD $0x0a0644203a0f4666; BYTE $0x05 // pinsrb xmm8, byte [rsi + r8 + 10], 5 + QUAD $0x0000012024848b48 // mov rax, qword [rsp + 288] + QUAD $0x0a0644203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rax + 10], 6 QUAD $0x0a1e44203a0f4466; BYTE $0x07 // pinsrb xmm8, byte [rsi + rbx + 10], 7 - WORD $0x894d; BYTE $0xf8 // mov r8, r15 - QUAD $0x0a3e44203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r15 + 10], 8 - QUAD $0x0a2e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r13 + 10], 9 - WORD $0x894d; BYTE $0xe1 // mov r9, r12 - QUAD $0x0a2644203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r12 + 10], 10 - LONG $0x24648b4c; BYTE $0x50 // mov r12, qword [rsp + 80] + QUAD $0x0a1e44203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r11 + 10], 8 + QUAD $0x0a0e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r9 + 10], 9 + WORD $0x894c; BYTE $0xf8 // mov rax, r15 + QUAD $0x0a3e44203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r15 + 10], 10 + WORD $0x894c; BYTE $0xe2 // mov rdx, r12 QUAD $0x0a2644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r12 + 10], 11 - QUAD $0x000000c024bc8b4c // mov r15, qword [rsp + 192] - QUAD $0x0a3e44203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r15 + 10], 12 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x0a0644203a0f4466; BYTE $0x0d // pinsrb xmm8, byte [rsi + rax + 10], 13 - LONG $0x245c8b48; BYTE $0x10 // mov rbx, qword [rsp + 16] - QUAD $0x0a1e44203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rbx + 10], 14 - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] - QUAD $0x0a1644203a0f4466; BYTE $0x0f // pinsrb xmm8, byte [rsi + rdx + 10], 15 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0b0654203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rax + 11], 1 + QUAD $0x000000d024a48b4c // mov r12, qword [rsp + 208] + QUAD $0x0a2644203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r12 + 10], 12 + WORD $0x894d; BYTE $0xef // mov r15, r13 + QUAD $0x0a2e44203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r13 + 10], 13 + LONG $0x244c8b48; BYTE $0x10 // mov rcx, qword [rsp + 16] + QUAD $0x0a0e44203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rcx + 10], 14 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x0a2e44203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r13 + 10], 15 + QUAD $0x0b3e54203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rdi + 11], 1 QUAD $0x0b3654203a0f4666; BYTE $0x02 // pinsrb xmm10, byte [rsi + r14 + 11], 2 QUAD $0x0b1654203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r10 + 11], 3 - LONG $0x24748b4c; BYTE $0x60 // mov r14, qword [rsp + 96] - QUAD $0x0b3654203a0f4666; BYTE $0x04 // pinsrb xmm10, byte [rsi + r14 + 11], 4 - QUAD $0x0b0e54203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rcx + 11], 5 - QUAD $0x0b3e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rdi + 11], 6 - QUAD $0x0b1e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r11 + 11], 7 - QUAD $0x0b0654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r8 + 11], 8 - QUAD $0x0b2e54203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r13 + 11], 9 - QUAD $0x0b0e54203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r9 + 11], 10 - QUAD $0x0b2654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r12 + 11], 11 - QUAD $0x0b3e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r15 + 11], 12 - LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] - QUAD $0x0b0654203a0f4466; BYTE $0x0d // pinsrb xmm10, byte [rsi + rax + 11], 13 - QUAD $0x0b1e54203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rbx + 11], 14 - QUAD $0x0b1654203a0f4466; BYTE $0x0f // pinsrb xmm10, byte [rsi + rdx + 11], 15 + LONG $0x247c8b48; BYTE $0x70 // mov rdi, qword [rsp + 112] + QUAD $0x0b3e54203a0f4466; BYTE $0x04 // pinsrb xmm10, byte [rsi + rdi + 11], 4 + QUAD $0x0b0654203a0f4666; BYTE $0x05 // pinsrb xmm10, byte [rsi + r8 + 11], 5 + QUAD $0x0000012024b48b4c // mov r14, qword [rsp + 288] + QUAD $0x0b3654203a0f4666; BYTE $0x06 // pinsrb xmm10, byte [rsi + r14 + 11], 6 + QUAD $0x0b1e54203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rbx + 11], 7 + QUAD $0x0b1e54203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r11 + 11], 8 + QUAD $0x0b0e54203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r9 + 11], 9 + QUAD $0x0b0654203a0f4466; BYTE $0x0a // pinsrb xmm10, byte [rsi + rax + 11], 10 + QUAD $0x0b1654203a0f4466; BYTE $0x0b // pinsrb xmm10, byte [rsi + rdx + 11], 11 + QUAD $0x0b2654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 11], 12 + QUAD $0x0b3e54203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r15 + 11], 13 + QUAD $0x0b0e54203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rcx + 11], 14 + QUAD $0x0b2e54203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r13 + 11], 15 QUAD $0x00000100bddb0f66 // pand xmm7, oword 256[rbp] /* [rip + .LCPI10_16] */ LONG $0xf80f4166; BYTE $0xf9 // psubb xmm7, xmm9 LONG $0x6f0f4166; BYTE $0xc8 // movdqa xmm1, xmm8 @@ -51142,56 +52629,60 @@ LBB10_195: LONG $0x6f0f4566; BYTE $0xca // movdqa xmm9, xmm10 LONG $0xde0f4566; BYTE $0xcd // pmaxub xmm9, xmm13 LONG $0x740f4566; BYTE $0xca // pcmpeqb xmm9, xmm10 - QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] LONG $0x0654b60f; BYTE $0x16 // movzx edx, byte [rsi + rax + 22] LONG $0x6e0f4466; BYTE $0xd2 // movd xmm10, edx - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x010c0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 12], 1 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] - QUAD $0x0c2674203a0f4266; BYTE $0x02 // pinsrb xmm6, byte [rsi + r12 + 12], 2 + LONG $0x24448b4c; BYTE $0x50 // mov r8, qword [rsp + 80] + QUAD $0x0c0674203a0f4266; BYTE $0x01 // pinsrb xmm6, byte [rsi + r8 + 12], 1 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x020c0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 12], 2 QUAD $0x0c1674203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r10 + 12], 3 - WORD $0x894c; BYTE $0xf2 // mov rdx, r14 - QUAD $0x0c3674203a0f4266; BYTE $0x04 // pinsrb xmm6, byte [rsi + r14 + 12], 4 - QUAD $0x050c0e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 12], 5 - QUAD $0x060c3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 12], 6 - QUAD $0x0c1e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r11 + 12], 7 - QUAD $0x0c0674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r8 + 12], 8 - QUAD $0x0c2e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r13 + 12], 9 - QUAD $0x0c0e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r9 + 12], 10 - LONG $0x24748b4c; BYTE $0x50 // mov r14, qword [rsp + 80] - QUAD $0x0c3674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r14 + 12], 11 - QUAD $0x0c3e74203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r15 + 12], 12 - LONG $0x245c8b48; BYTE $0x70 // mov rbx, qword [rsp + 112] - QUAD $0x0d0c1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 12], 13 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] - QUAD $0x0e0c0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 12], 14 - LONG $0x24448b48; BYTE $0x30 // mov rax, qword [rsp + 48] - QUAD $0x0f0c0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 12], 15 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] - QUAD $0x0d067c203a0f4466; BYTE $0x01 // pinsrb xmm15, byte [rsi + rax + 13], 1 - QUAD $0x0d267c203a0f4666; BYTE $0x02 // pinsrb xmm15, byte [rsi + r12 + 13], 2 - QUAD $0x0d167c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r10 + 13], 3 - QUAD $0x0d167c203a0f4466; BYTE $0x04 // pinsrb xmm15, byte [rsi + rdx + 13], 4 - QUAD $0x0d0e7c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rcx + 13], 5 - QUAD $0x0d3e7c203a0f4466; BYTE $0x06 // pinsrb xmm15, byte [rsi + rdi + 13], 6 - QUAD $0x0d1e7c203a0f4666; BYTE $0x07 // pinsrb xmm15, byte [rsi + r11 + 13], 7 - QUAD $0x0d067c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r8 + 13], 8 - QUAD $0x0d2e7c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r13 + 13], 9 - QUAD $0x0d0e7c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r9 + 13], 10 - QUAD $0x0d367c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r14 + 13], 11 - QUAD $0x0d3e7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r15 + 13], 12 - WORD $0x894c; BYTE $0xf8 // mov rax, r15 - QUAD $0x0d1e7c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rsi + rbx + 13], 13 - LONG $0x247c8b4c; BYTE $0x10 // mov r15, qword [rsp + 16] - QUAD $0x0d3e7c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r15 + 13], 14 + WORD $0x894d; BYTE $0xd7 // mov r15, r10 + WORD $0x8948; BYTE $0xf9 // mov rcx, rdi + QUAD $0x040c3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 12], 4 + LONG $0x247c8b48; BYTE $0x30 // mov rdi, qword [rsp + 48] + QUAD $0x050c3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 12], 5 + QUAD $0x0c3674203a0f4266; BYTE $0x06 // pinsrb xmm6, byte [rsi + r14 + 12], 6 + QUAD $0x070c1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 12], 7 + QUAD $0x0c1e74203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r11 + 12], 8 + QUAD $0x0c0e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r9 + 12], 9 + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x0a0c1674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 12], 10 + QUAD $0x000000a0249c8b4c // mov r11, qword [rsp + 160] + QUAD $0x0c1e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r11 + 12], 11 + QUAD $0x0c2674203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r12 + 12], 12 + LONG $0x24548b48; BYTE $0x60 // mov rdx, qword [rsp + 96] + QUAD $0x0d0c1674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 12], 13 + LONG $0x24548b4c; BYTE $0x10 // mov r10, qword [rsp + 16] + QUAD $0x0c1674203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r10 + 12], 14 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x0c2e74203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r13 + 12], 15 + QUAD $0x0d067c203a0f4666; BYTE $0x01 // pinsrb xmm15, byte [rsi + r8 + 13], 1 + QUAD $0x0d067c203a0f4466; BYTE $0x02 // pinsrb xmm15, byte [rsi + rax + 13], 2 + QUAD $0x0d3e7c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r15 + 13], 3 + QUAD $0x0d0e7c203a0f4466; BYTE $0x04 // pinsrb xmm15, byte [rsi + rcx + 13], 4 + QUAD $0x0d3e7c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rdi + 13], 5 + WORD $0x8949; BYTE $0xf8 // mov r8, rdi + QUAD $0x0d367c203a0f4666; BYTE $0x06 // pinsrb xmm15, byte [rsi + r14 + 13], 6 + QUAD $0x0d1e7c203a0f4466; BYTE $0x07 // pinsrb xmm15, byte [rsi + rbx + 13], 7 + QUAD $0x0000010024bc8b48 // mov rdi, qword [rsp + 256] + QUAD $0x0d3e7c203a0f4466; BYTE $0x08 // pinsrb xmm15, byte [rsi + rdi + 13], 8 + QUAD $0x0d0e7c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r9 + 13], 9 + QUAD $0x000000b024bc8b4c // mov r15, qword [rsp + 176] + QUAD $0x0d3e7c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r15 + 13], 10 + QUAD $0x0d1e7c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r11 + 13], 11 + QUAD $0x0d267c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r12 + 13], 12 + QUAD $0x0d167c203a0f4466; BYTE $0x0d // pinsrb xmm15, byte [rsi + rdx + 13], 13 + WORD $0x8949; BYTE $0xd3 // mov r11, rdx + QUAD $0x0d167c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r10 + 13], 14 QUAD $0x000001108ddb0f66 // pand xmm1, oword 272[rbp] /* [rip + .LCPI10_17] */ QUAD $0x0001208ddb0f4466; BYTE $0x00 // pand xmm9, oword 288[rbp] /* [rip + .LCPI10_18] */ LONG $0xeb0f4466; BYTE $0xc9 // por xmm9, xmm1 - QUAD $0x00000080249c8b48 // mov rbx, qword [rsp + 128] - LONG $0x1e54b60f; BYTE $0x17 // movzx edx, byte [rsi + rbx + 23] + QUAD $0x0000009024848b48 // mov rax, qword [rsp + 144] + LONG $0x0654b60f; BYTE $0x17 // movzx edx, byte [rsi + rax + 23] LONG $0x6e0f4466; BYTE $0xc2 // movd xmm8, edx - LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] - QUAD $0x0d167c203a0f4466; BYTE $0x0f // pinsrb xmm15, byte [rsi + rdx + 13], 15 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] + QUAD $0x0d2e7c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r13 + 13], 15 LONG $0xeb0f4466; BYTE $0xcf // por xmm9, xmm7 LONG $0xce6f0f66 // movdqa xmm1, xmm6 LONG $0xde0f4166; BYTE $0xcd // pmaxub xmm1, xmm13 @@ -51199,155 +52690,156 @@ LBB10_195: LONG $0x6f0f4166; BYTE $0xff // movdqa xmm7, xmm15 LONG $0xde0f4166; BYTE $0xfd // pmaxub xmm7, xmm13 LONG $0x740f4166; BYTE $0xff // pcmpeqb xmm7, xmm15 - LONG $0x1e54b60f; BYTE $0x19 // movzx edx, byte [rsi + rbx + 25] + LONG $0x0654b60f; BYTE $0x19 // movzx edx, byte [rsi + rax + 25] LONG $0x6e0f4466; BYTE $0xfa // movd xmm15, edx - LONG $0x245c8b48; BYTE $0x20 // mov rbx, qword [rsp + 32] - QUAD $0x0e1e74203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rsi + rbx + 14], 1 - QUAD $0x0e2674203a0f4666; BYTE $0x02 // pinsrb xmm14, byte [rsi + r12 + 14], 2 + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] + QUAD $0x0e0674203a0f4466; BYTE $0x01 // pinsrb xmm14, byte [rsi + rax + 14], 1 + QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] + QUAD $0x0e0e74203a0f4466; BYTE $0x02 // pinsrb xmm14, byte [rsi + rcx + 14], 2 + QUAD $0x000000c024948b4c // mov r10, qword [rsp + 192] QUAD $0x0e1674203a0f4666; BYTE $0x03 // pinsrb xmm14, byte [rsi + r10 + 14], 3 - LONG $0x24648b4c; BYTE $0x60 // mov r12, qword [rsp + 96] - QUAD $0x0e2674203a0f4666; BYTE $0x04 // pinsrb xmm14, byte [rsi + r12 + 14], 4 - QUAD $0x0e0e74203a0f4466; BYTE $0x05 // pinsrb xmm14, byte [rsi + rcx + 14], 5 - QUAD $0x0e3e74203a0f4466; BYTE $0x06 // pinsrb xmm14, byte [rsi + rdi + 14], 6 - QUAD $0x0e1e74203a0f4666; BYTE $0x07 // pinsrb xmm14, byte [rsi + r11 + 14], 7 - QUAD $0x0e0674203a0f4666; BYTE $0x08 // pinsrb xmm14, byte [rsi + r8 + 14], 8 - WORD $0x894c; BYTE $0xea // mov rdx, r13 - QUAD $0x0e2e74203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r13 + 14], 9 - QUAD $0x0e0e74203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rsi + r9 + 14], 10 - QUAD $0x0e3674203a0f4666; BYTE $0x0b // pinsrb xmm14, byte [rsi + r14 + 14], 11 - QUAD $0x0e0674203a0f4466; BYTE $0x0c // pinsrb xmm14, byte [rsi + rax + 14], 12 - LONG $0x246c8b4c; BYTE $0x70 // mov r13, qword [rsp + 112] - QUAD $0x0e2e74203a0f4666; BYTE $0x0d // pinsrb xmm14, byte [rsi + r13 + 14], 13 - QUAD $0x0e3e74203a0f4666; BYTE $0x0e // pinsrb xmm14, byte [rsi + r15 + 14], 14 - LONG $0x247c8b4c; BYTE $0x30 // mov r15, qword [rsp + 48] - QUAD $0x0e3e74203a0f4666; BYTE $0x0f // pinsrb xmm14, byte [rsi + r15 + 14], 15 - QUAD $0x0f1e5c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rbx + 15], 1 - QUAD $0x000000b0249c8b48 // mov rbx, qword [rsp + 176] - QUAD $0x0f1e5c203a0f4466; BYTE $0x02 // pinsrb xmm11, byte [rsi + rbx + 15], 2 - QUAD $0x0f165c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r10 + 15], 3 - QUAD $0x0f265c203a0f4666; BYTE $0x04 // pinsrb xmm11, byte [rsi + r12 + 15], 4 - QUAD $0x0f0e5c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rcx + 15], 5 - QUAD $0x0f3e5c203a0f4466; BYTE $0x06 // pinsrb xmm11, byte [rsi + rdi + 15], 6 - QUAD $0x0f1e5c203a0f4666; BYTE $0x07 // pinsrb xmm11, byte [rsi + r11 + 15], 7 - QUAD $0x0f065c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r8 + 15], 8 - QUAD $0x0f165c203a0f4466; BYTE $0x09 // pinsrb xmm11, byte [rsi + rdx + 15], 9 - QUAD $0x0f0e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r9 + 15], 10 - QUAD $0x0f365c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r14 + 15], 11 - QUAD $0x0f065c203a0f4466; BYTE $0x0c // pinsrb xmm11, byte [rsi + rax + 15], 12 - QUAD $0x0f2e5c203a0f4666; BYTE $0x0d // pinsrb xmm11, byte [rsi + r13 + 15], 13 + LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] + QUAD $0x0e1674203a0f4466; BYTE $0x04 // pinsrb xmm14, byte [rsi + rdx + 14], 4 + QUAD $0x0e0674203a0f4666; BYTE $0x05 // pinsrb xmm14, byte [rsi + r8 + 14], 5 + WORD $0x894d; BYTE $0xf0 // mov r8, r14 + QUAD $0x0e3674203a0f4666; BYTE $0x06 // pinsrb xmm14, byte [rsi + r14 + 14], 6 + QUAD $0x0e1e74203a0f4466; BYTE $0x07 // pinsrb xmm14, byte [rsi + rbx + 14], 7 + QUAD $0x0e3e74203a0f4466; BYTE $0x08 // pinsrb xmm14, byte [rsi + rdi + 14], 8 + WORD $0x8949; BYTE $0xfe // mov r14, rdi + QUAD $0x0e0e74203a0f4666; BYTE $0x09 // pinsrb xmm14, byte [rsi + r9 + 14], 9 + QUAD $0x0e3e74203a0f4666; BYTE $0x0a // pinsrb xmm14, byte [rsi + r15 + 14], 10 + QUAD $0x000000a024bc8b48 // mov rdi, qword [rsp + 160] + QUAD $0x0e3e74203a0f4466; BYTE $0x0b // pinsrb xmm14, byte [rsi + rdi + 14], 11 + QUAD $0x0e2674203a0f4666; BYTE $0x0c // pinsrb xmm14, byte [rsi + r12 + 14], 12 + QUAD $0x0e1e74203a0f4666; BYTE $0x0d // pinsrb xmm14, byte [rsi + r11 + 14], 13 LONG $0x247c8b48; BYTE $0x10 // mov rdi, qword [rsp + 16] + QUAD $0x0e3e74203a0f4466; BYTE $0x0e // pinsrb xmm14, byte [rsi + rdi + 14], 14 + QUAD $0x0e2e74203a0f4666; BYTE $0x0f // pinsrb xmm14, byte [rsi + r13 + 14], 15 + QUAD $0x0f065c203a0f4466; BYTE $0x01 // pinsrb xmm11, byte [rsi + rax + 15], 1 + QUAD $0x0f0e5c203a0f4466; BYTE $0x02 // pinsrb xmm11, byte [rsi + rcx + 15], 2 + QUAD $0x0f165c203a0f4666; BYTE $0x03 // pinsrb xmm11, byte [rsi + r10 + 15], 3 + QUAD $0x0f165c203a0f4466; BYTE $0x04 // pinsrb xmm11, byte [rsi + rdx + 15], 4 + WORD $0x8948; BYTE $0xd1 // mov rcx, rdx + LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + QUAD $0x0f165c203a0f4466; BYTE $0x05 // pinsrb xmm11, byte [rsi + rdx + 15], 5 + QUAD $0x0f065c203a0f4666; BYTE $0x06 // pinsrb xmm11, byte [rsi + r8 + 15], 6 + QUAD $0x0f1e5c203a0f4466; BYTE $0x07 // pinsrb xmm11, byte [rsi + rbx + 15], 7 + QUAD $0x0f365c203a0f4666; BYTE $0x08 // pinsrb xmm11, byte [rsi + r14 + 15], 8 + QUAD $0x0f0e5c203a0f4666; BYTE $0x09 // pinsrb xmm11, byte [rsi + r9 + 15], 9 + QUAD $0x0f3e5c203a0f4666; BYTE $0x0a // pinsrb xmm11, byte [rsi + r15 + 15], 10 + QUAD $0x000000a024848b4c // mov r8, qword [rsp + 160] + QUAD $0x0f065c203a0f4666; BYTE $0x0b // pinsrb xmm11, byte [rsi + r8 + 15], 11 + QUAD $0x0f265c203a0f4666; BYTE $0x0c // pinsrb xmm11, byte [rsi + r12 + 15], 12 + QUAD $0x0f1e5c203a0f4666; BYTE $0x0d // pinsrb xmm11, byte [rsi + r11 + 15], 13 QUAD $0x0f3e5c203a0f4466; BYTE $0x0e // pinsrb xmm11, byte [rsi + rdi + 15], 14 - QUAD $0x0f3e5c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r15 + 15], 15 - WORD $0x894d; BYTE $0xfc // mov r12, r15 + QUAD $0x0f2e5c203a0f4666; BYTE $0x0f // pinsrb xmm11, byte [rsi + r13 + 15], 15 QUAD $0x000001308ddb0f66 // pand xmm1, oword 304[rbp] /* [rip + .LCPI10_19] */ QUAD $0x00000140bddb0f66 // pand xmm7, oword 320[rbp] /* [rip + .LCPI10_20] */ LONG $0xf9eb0f66 // por xmm7, xmm1 LONG $0x6f0f4166; BYTE $0xce // movdqa xmm1, xmm14 LONG $0xde0f4166; BYTE $0xcd // pmaxub xmm1, xmm13 LONG $0x740f4166; BYTE $0xce // pcmpeqb xmm1, xmm14 - QUAD $0x0000008024ac8b4c // mov r13, qword [rsp + 128] - LONG $0x54b60f42; WORD $0x1a2e // movzx edx, byte [rsi + r13 + 26] + QUAD $0x0000009024bc8b48 // mov rdi, qword [rsp + 144] + LONG $0x3e54b60f; BYTE $0x1a // movzx edx, byte [rsi + rdi + 26] LONG $0xf26e0f66 // movd xmm6, edx QUAD $0x000001508ddb0f66 // pand xmm1, oword 336[rbp] /* [rip + .LCPI10_21] */ LONG $0xcfeb0f66 // por xmm1, xmm7 - LONG $0x54b60f42; WORD $0x1b2e // movzx edx, byte [rsi + r13 + 27] + LONG $0x3e54b60f; BYTE $0x1b // movzx edx, byte [rsi + rdi + 27] LONG $0xfa6e0f66 // movd xmm7, edx LONG $0xeb0f4166; BYTE $0xc9 // por xmm1, xmm9 LONG $0x6f0f4566; BYTE $0xf3 // movdqa xmm14, xmm11 LONG $0xde0f4566; BYTE $0xf5 // pmaxub xmm14, xmm13 LONG $0x740f4566; BYTE $0xf3 // pcmpeqb xmm14, xmm11 - LONG $0x54b60f42; WORD $0x1c2e // movzx edx, byte [rsi + r13 + 28] + LONG $0x3e54b60f; BYTE $0x1c // movzx edx, byte [rsi + rdi + 28] LONG $0x6e0f4466; BYTE $0xca // movd xmm9, edx - LONG $0x244c8b48; BYTE $0x20 // mov rcx, qword [rsp + 32] - QUAD $0x01110e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 17], 1 - QUAD $0x02111e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 17], 2 + QUAD $0x01110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 1 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x02110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 2 QUAD $0x111644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r10 + 17], 3 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] - QUAD $0x04110644203a0f66 // pinsrb xmm0, byte [rsi + rax + 17], 4 - QUAD $0x0000009024848b4c // mov r8, qword [rsp + 144] - QUAD $0x110644203a0f4266; BYTE $0x05 // pinsrb xmm0, byte [rsi + r8 + 17], 5 - QUAD $0x000000f0248c8b4c // mov r9, qword [rsp + 240] + QUAD $0x04110e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 17], 4 + LONG $0x24548b48; BYTE $0x30 // mov rdx, qword [rsp + 48] + QUAD $0x05111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 5 + QUAD $0x00000120248c8b4c // mov r9, qword [rsp + 288] QUAD $0x110e44203a0f4266; BYTE $0x06 // pinsrb xmm0, byte [rsi + r9 + 17], 6 - QUAD $0x111e44203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r11 + 17], 7 - QUAD $0x000000e0249c8b48 // mov rbx, qword [rsp + 224] - QUAD $0x08111e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 17], 8 - QUAD $0x0000011024948b48 // mov rdx, qword [rsp + 272] - QUAD $0x09111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 9 - QUAD $0x0000010024b48b4c // mov r14, qword [rsp + 256] - QUAD $0x113644203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r14 + 17], 10 - LONG $0x247c8b4c; BYTE $0x50 // mov r15, qword [rsp + 80] - QUAD $0x113e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r15 + 17], 11 - QUAD $0x000000c024948b48 // mov rdx, qword [rsp + 192] - QUAD $0x0c111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 12 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] - QUAD $0x0d111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 13 - QUAD $0x0e113e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 17], 14 - QUAD $0x112644203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r12 + 17], 15 + QUAD $0x07111e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 17], 7 + QUAD $0x00000100249c8b4c // mov r11, qword [rsp + 256] + QUAD $0x111e44203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r11 + 17], 8 + QUAD $0x0000013024bc8b4c // mov r15, qword [rsp + 304] + QUAD $0x113e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r15 + 17], 9 + QUAD $0x000000b024948b48 // mov rdx, qword [rsp + 176] + QUAD $0x0a111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 10 + QUAD $0x110644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r8 + 17], 11 + QUAD $0x112644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r12 + 17], 12 + LONG $0x246c8b4c; BYTE $0x60 // mov r13, qword [rsp + 96] + QUAD $0x112e44203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r13 + 17], 13 + LONG $0x24748b4c; BYTE $0x10 // mov r14, qword [rsp + 16] + QUAD $0x113644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r14 + 17], 14 + LONG $0x24548b48; BYTE $0x20 // mov rdx, qword [rsp + 32] + QUAD $0x0f111644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 17], 15 LONG $0xdb0f4466; WORD $0x6075 // pand xmm14, oword 96[rbp] /* [rip + .LCPI10_6] */ LONG $0xeb0f4466; BYTE $0xf1 // por xmm14, xmm1 LONG $0xc86f0f66 // movdqa xmm1, xmm0 LONG $0x6f0f4566; BYTE $0xe5 // movdqa xmm12, xmm13 LONG $0xde0f4166; BYTE $0xcd // pmaxub xmm1, xmm13 LONG $0xc8740f66 // pcmpeqb xmm1, xmm0 - LONG $0x54b60f42; WORD $0x1d2e // movzx edx, byte [rsi + r13 + 29] + LONG $0x3e54b60f; BYTE $0x1d // movzx edx, byte [rsi + rdi + 29] LONG $0xc26e0f66 // movd xmm0, edx - QUAD $0x01120e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 18], 1 - QUAD $0x000000b024a48b4c // mov r12, qword [rsp + 176] - QUAD $0x122664203a0f4266; BYTE $0x02 // pinsrb xmm4, byte [rsi + r12 + 18], 2 + LONG $0x24548b48; BYTE $0x50 // mov rdx, qword [rsp + 80] + QUAD $0x01121664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 18], 1 + QUAD $0x02120664203a0f66 // pinsrb xmm4, byte [rsi + rax + 18], 2 QUAD $0x121664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r10 + 18], 3 - QUAD $0x04120664203a0f66 // pinsrb xmm4, byte [rsi + rax + 18], 4 - WORD $0x894c; BYTE $0xc1 // mov rcx, r8 - QUAD $0x120664203a0f4266; BYTE $0x05 // pinsrb xmm4, byte [rsi + r8 + 18], 5 + QUAD $0x04120e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 18], 4 + LONG $0x244c8b48; BYTE $0x30 // mov rcx, qword [rsp + 48] + QUAD $0x05120e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 18], 5 WORD $0x894c; BYTE $0xcf // mov rdi, r9 QUAD $0x120e64203a0f4266; BYTE $0x06 // pinsrb xmm4, byte [rsi + r9 + 18], 6 - QUAD $0x121e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r11 + 18], 7 - WORD $0x8949; BYTE $0xd8 // mov r8, rbx - QUAD $0x08121e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 18], 8 - QUAD $0x00000110249c8b48 // mov rbx, qword [rsp + 272] - QUAD $0x09121e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 18], 9 - WORD $0x894d; BYTE $0xf1 // mov r9, r14 - QUAD $0x123664203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r14 + 18], 10 - WORD $0x894d; BYTE $0xfe // mov r14, r15 + QUAD $0x07121e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 18], 7 + WORD $0x894d; BYTE $0xd8 // mov r8, r11 + QUAD $0x121e64203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r11 + 18], 8 + WORD $0x894d; BYTE $0xf9 // mov r9, r15 + QUAD $0x123e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r15 + 18], 9 + QUAD $0x000000b0249c8b4c // mov r11, qword [rsp + 176] + QUAD $0x121e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r11 + 18], 10 + QUAD $0x000000a024bc8b4c // mov r15, qword [rsp + 160] QUAD $0x123e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 18], 11 - QUAD $0x000000c024bc8b4c // mov r15, qword [rsp + 192] - QUAD $0x123e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r15 + 18], 12 - LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] - QUAD $0x0d121664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 18], 13 - LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] - QUAD $0x0e120664203a0f66 // pinsrb xmm4, byte [rsi + rax + 18], 14 - LONG $0x246c8b4c; BYTE $0x30 // mov r13, qword [rsp + 48] + QUAD $0x122664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 18], 12 + WORD $0x894c; BYTE $0xea // mov rdx, r13 + QUAD $0x122e64203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r13 + 18], 13 + QUAD $0x123664203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r14 + 18], 14 + LONG $0x246c8b4c; BYTE $0x20 // mov r13, qword [rsp + 32] QUAD $0x122e64203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r13 + 18], 15 - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x0113066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 19], 1 - QUAD $0x13266c203a0f4266; BYTE $0x02 // pinsrb xmm5, byte [rsi + r12 + 19], 2 + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] + QUAD $0x0213066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 19], 2 QUAD $0x13166c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r10 + 19], 3 - LONG $0x24448b48; BYTE $0x60 // mov rax, qword [rsp + 96] + LONG $0x24448b48; BYTE $0x70 // mov rax, qword [rsp + 112] QUAD $0x0413066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 19], 4 QUAD $0x05130e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 19], 5 QUAD $0x06133e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 19], 6 - QUAD $0x131e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r11 + 19], 7 + QUAD $0x07131e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 19], 7 QUAD $0x13066c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r8 + 19], 8 - QUAD $0x09131e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 19], 9 - QUAD $0x130e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r9 + 19], 10 - QUAD $0x13366c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r14 + 19], 11 - QUAD $0x133e6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r15 + 19], 12 + WORD $0x894d; BYTE $0xc6 // mov r14, r8 + QUAD $0x130e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r9 + 19], 9 + QUAD $0x131e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r11 + 19], 10 + QUAD $0x133e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 19], 11 + QUAD $0x13266c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r12 + 19], 12 QUAD $0x0d13166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 19], 13 - WORD $0x8948; BYTE $0xd7 // mov rdi, rdx - LONG $0x24648b4c; BYTE $0x10 // mov r12, qword [rsp + 16] - QUAD $0x13266c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r12 + 19], 14 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x0e13066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 19], 14 QUAD $0x132e6c203a0f4266; BYTE $0x0f // pinsrb xmm5, byte [rsi + r13 + 19], 15 QUAD $0x000001008ddb0f66 // pand xmm1, oword 256[rbp] /* [rip + .LCPI10_16] */ - QUAD $0x0001b0248cf80f66; BYTE $0x00 // psubb xmm1, oword [rsp + 432] + QUAD $0x000170248cf80f66; BYTE $0x00 // psubb xmm1, oword [rsp + 368] LONG $0x6f0f4466; BYTE $0xec // movdqa xmm13, xmm4 LONG $0xde0f4566; BYTE $0xec // pmaxub xmm13, xmm12 LONG $0x740f4466; BYTE $0xec // pcmpeqb xmm13, xmm4 LONG $0x6f0f4466; BYTE $0xdd // movdqa xmm11, xmm5 LONG $0xde0f4566; BYTE $0xdc // pmaxub xmm11, xmm12 LONG $0x740f4466; BYTE $0xdd // pcmpeqb xmm11, xmm5 - QUAD $0x00000080248c8b48 // mov rcx, qword [rsp + 128] - LONG $0x0e54b60f; BYTE $0x1e // movzx edx, byte [rsi + rcx + 30] + QUAD $0x0000009024848b4c // mov r8, qword [rsp + 144] + LONG $0x54b60f42; WORD $0x1e06 // movzx edx, byte [rsi + r8 + 30] LONG $0xe26e0f66 // movd xmm4, edx - LONG $0x24448b48; BYTE $0x20 // mov rax, qword [rsp + 32] + LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] QUAD $0x01140654203a0f66 // pinsrb xmm2, byte [rsi + rax + 20], 1 QUAD $0x0115065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 21], 1 QUAD $0x160654203a0f4466; BYTE $0x01 // pinsrb xmm10, byte [rsi + rax + 22], 1 @@ -51357,11 +52849,11 @@ LBB10_195: QUAD $0x011b067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 27], 1 QUAD $0x1c064c203a0f4466; BYTE $0x01 // pinsrb xmm9, byte [rsi + rax + 28], 1 QUAD $0x011d0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 29], 1 - LONG $0x0e54b60f; BYTE $0x1f // movzx edx, byte [rsi + rcx + 31] + LONG $0x54b60f42; WORD $0x1f06 // movzx edx, byte [rsi + r8 + 31] QUAD $0x011e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 30], 1 LONG $0xea6e0f66 // movd xmm5, edx QUAD $0x011f066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 31], 1 - QUAD $0x000000b024848b48 // mov rax, qword [rsp + 176] + QUAD $0x0000008024848b48 // mov rax, qword [rsp + 128] QUAD $0x02140654203a0f66 // pinsrb xmm2, byte [rsi + rax + 20], 2 QUAD $0x0215065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 21], 2 QUAD $0x160654203a0f4466; BYTE $0x02 // pinsrb xmm10, byte [rsi + rax + 22], 2 @@ -51374,36 +52866,33 @@ LBB10_195: QUAD $0x021e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 30], 2 QUAD $0x021f066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 31], 2 QUAD $0x141654203a0f4266; BYTE $0x03 // pinsrb xmm2, byte [rsi + r10 + 20], 3 - LONG $0x24548b48; BYTE $0x60 // mov rdx, qword [rsp + 96] + LONG $0x24548b48; BYTE $0x70 // mov rdx, qword [rsp + 112] QUAD $0x04141654203a0f66 // pinsrb xmm2, byte [rsi + rdx + 20], 4 - QUAD $0x00000090248c8b48 // mov rcx, qword [rsp + 144] QUAD $0x05140e54203a0f66 // pinsrb xmm2, byte [rsi + rcx + 20], 5 - QUAD $0x000000f024848b48 // mov rax, qword [rsp + 240] - QUAD $0x06140654203a0f66 // pinsrb xmm2, byte [rsi + rax + 20], 6 - QUAD $0x141e54203a0f4266; BYTE $0x07 // pinsrb xmm2, byte [rsi + r11 + 20], 7 - QUAD $0x140654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r8 + 20], 8 - QUAD $0x09141e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 20], 9 - QUAD $0x140e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r9 + 20], 10 - QUAD $0x143654203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r14 + 20], 11 - QUAD $0x143e54203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r15 + 20], 12 - QUAD $0x0d143e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 20], 13 - QUAD $0x142654203a0f4266; BYTE $0x0e // pinsrb xmm2, byte [rsi + r12 + 20], 14 + QUAD $0x06143e54203a0f66 // pinsrb xmm2, byte [rsi + rdi + 20], 6 + QUAD $0x07141e54203a0f66 // pinsrb xmm2, byte [rsi + rbx + 20], 7 + QUAD $0x143654203a0f4266; BYTE $0x08 // pinsrb xmm2, byte [rsi + r14 + 20], 8 + QUAD $0x140e54203a0f4266; BYTE $0x09 // pinsrb xmm2, byte [rsi + r9 + 20], 9 + QUAD $0x141e54203a0f4266; BYTE $0x0a // pinsrb xmm2, byte [rsi + r11 + 20], 10 + QUAD $0x143e54203a0f4266; BYTE $0x0b // pinsrb xmm2, byte [rsi + r15 + 20], 11 + QUAD $0x142654203a0f4266; BYTE $0x0c // pinsrb xmm2, byte [rsi + r12 + 20], 12 + LONG $0x24448b4c; BYTE $0x60 // mov r8, qword [rsp + 96] + QUAD $0x140654203a0f4266; BYTE $0x0d // pinsrb xmm2, byte [rsi + r8 + 20], 13 + LONG $0x24448b48; BYTE $0x10 // mov rax, qword [rsp + 16] + QUAD $0x0e140654203a0f66 // pinsrb xmm2, byte [rsi + rax + 20], 14 QUAD $0x142e54203a0f4266; BYTE $0x0f // pinsrb xmm2, byte [rsi + r13 + 20], 15 QUAD $0x15165c203a0f4266; BYTE $0x03 // pinsrb xmm3, byte [rsi + r10 + 21], 3 - WORD $0x894d; BYTE $0xd1 // mov r9, r10 QUAD $0x0415165c203a0f66 // pinsrb xmm3, byte [rsi + rdx + 21], 4 QUAD $0x05150e5c203a0f66 // pinsrb xmm3, byte [rsi + rcx + 21], 5 - QUAD $0x0615065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 21], 6 - QUAD $0x151e5c203a0f4266; BYTE $0x07 // pinsrb xmm3, byte [rsi + r11 + 21], 7 - QUAD $0x15065c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r8 + 21], 8 - QUAD $0x09151e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 21], 9 - QUAD $0x0000010024848b4c // mov r8, qword [rsp + 256] - QUAD $0x15065c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r8 + 21], 10 - QUAD $0x15365c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r14 + 21], 11 - QUAD $0x153e5c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r15 + 21], 12 - WORD $0x8949; BYTE $0xfa // mov r10, rdi - QUAD $0x0d153e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 21], 13 - QUAD $0x15265c203a0f4266; BYTE $0x0e // pinsrb xmm3, byte [rsi + r12 + 21], 14 + QUAD $0x06153e5c203a0f66 // pinsrb xmm3, byte [rsi + rdi + 21], 6 + QUAD $0x07151e5c203a0f66 // pinsrb xmm3, byte [rsi + rbx + 21], 7 + QUAD $0x15365c203a0f4266; BYTE $0x08 // pinsrb xmm3, byte [rsi + r14 + 21], 8 + QUAD $0x150e5c203a0f4266; BYTE $0x09 // pinsrb xmm3, byte [rsi + r9 + 21], 9 + QUAD $0x151e5c203a0f4266; BYTE $0x0a // pinsrb xmm3, byte [rsi + r11 + 21], 10 + QUAD $0x153e5c203a0f4266; BYTE $0x0b // pinsrb xmm3, byte [rsi + r15 + 21], 11 + QUAD $0x15265c203a0f4266; BYTE $0x0c // pinsrb xmm3, byte [rsi + r12 + 21], 12 + QUAD $0x15065c203a0f4266; BYTE $0x0d // pinsrb xmm3, byte [rsi + r8 + 21], 13 + QUAD $0x0e15065c203a0f66 // pinsrb xmm3, byte [rsi + rax + 21], 14 QUAD $0x000110addb0f4466; BYTE $0x00 // pand xmm13, oword 272[rbp] /* [rip + .LCPI10_17] */ QUAD $0x0001209ddb0f4466; BYTE $0x00 // pand xmm11, oword 288[rbp] /* [rip + .LCPI10_18] */ LONG $0xeb0f4566; BYTE $0xdd // por xmm11, xmm13 @@ -51415,38 +52904,37 @@ LBB10_195: LONG $0xd36f0f66 // movdqa xmm2, xmm3 LONG $0xde0f4166; BYTE $0xd4 // pmaxub xmm2, xmm12 LONG $0xd3740f66 // pcmpeqb xmm2, xmm3 - QUAD $0x160e54203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r9 + 22], 3 + QUAD $0x161654203a0f4666; BYTE $0x03 // pinsrb xmm10, byte [rsi + r10 + 22], 3 QUAD $0x161654203a0f4466; BYTE $0x04 // pinsrb xmm10, byte [rsi + rdx + 22], 4 QUAD $0x160e54203a0f4466; BYTE $0x05 // pinsrb xmm10, byte [rsi + rcx + 22], 5 - QUAD $0x160654203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rax + 22], 6 - QUAD $0x161e54203a0f4666; BYTE $0x07 // pinsrb xmm10, byte [rsi + r11 + 22], 7 - QUAD $0x000000e024bc8b48 // mov rdi, qword [rsp + 224] - QUAD $0x163e54203a0f4466; BYTE $0x08 // pinsrb xmm10, byte [rsi + rdi + 22], 8 - QUAD $0x161e54203a0f4466; BYTE $0x09 // pinsrb xmm10, byte [rsi + rbx + 22], 9 - QUAD $0x160654203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r8 + 22], 10 - QUAD $0x163654203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r14 + 22], 11 - QUAD $0x163e54203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r15 + 22], 12 - QUAD $0x161654203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r10 + 22], 13 - QUAD $0x162654203a0f4666; BYTE $0x0e // pinsrb xmm10, byte [rsi + r12 + 22], 14 + QUAD $0x163e54203a0f4466; BYTE $0x06 // pinsrb xmm10, byte [rsi + rdi + 22], 6 + QUAD $0x161e54203a0f4466; BYTE $0x07 // pinsrb xmm10, byte [rsi + rbx + 22], 7 + QUAD $0x163654203a0f4666; BYTE $0x08 // pinsrb xmm10, byte [rsi + r14 + 22], 8 + QUAD $0x160e54203a0f4666; BYTE $0x09 // pinsrb xmm10, byte [rsi + r9 + 22], 9 + QUAD $0x161e54203a0f4666; BYTE $0x0a // pinsrb xmm10, byte [rsi + r11 + 22], 10 + QUAD $0x163e54203a0f4666; BYTE $0x0b // pinsrb xmm10, byte [rsi + r15 + 22], 11 + QUAD $0x162654203a0f4666; BYTE $0x0c // pinsrb xmm10, byte [rsi + r12 + 22], 12 + QUAD $0x160654203a0f4666; BYTE $0x0d // pinsrb xmm10, byte [rsi + r8 + 22], 13 + QUAD $0x160654203a0f4466; BYTE $0x0e // pinsrb xmm10, byte [rsi + rax + 22], 14 QUAD $0x162e54203a0f4666; BYTE $0x0f // pinsrb xmm10, byte [rsi + r13 + 22], 15 - QUAD $0x170e44203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r9 + 23], 3 + QUAD $0x171644203a0f4666; BYTE $0x03 // pinsrb xmm8, byte [rsi + r10 + 23], 3 QUAD $0x171644203a0f4466; BYTE $0x04 // pinsrb xmm8, byte [rsi + rdx + 23], 4 QUAD $0x170e44203a0f4466; BYTE $0x05 // pinsrb xmm8, byte [rsi + rcx + 23], 5 - QUAD $0x170644203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rax + 23], 6 - QUAD $0x171e44203a0f4666; BYTE $0x07 // pinsrb xmm8, byte [rsi + r11 + 23], 7 - QUAD $0x173e44203a0f4466; BYTE $0x08 // pinsrb xmm8, byte [rsi + rdi + 23], 8 - QUAD $0x171e44203a0f4466; BYTE $0x09 // pinsrb xmm8, byte [rsi + rbx + 23], 9 - QUAD $0x170644203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r8 + 23], 10 - QUAD $0x173644203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r14 + 23], 11 - QUAD $0x173e44203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r15 + 23], 12 - QUAD $0x171644203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r10 + 23], 13 + QUAD $0x173e44203a0f4466; BYTE $0x06 // pinsrb xmm8, byte [rsi + rdi + 23], 6 + QUAD $0x171e44203a0f4466; BYTE $0x07 // pinsrb xmm8, byte [rsi + rbx + 23], 7 + QUAD $0x173644203a0f4666; BYTE $0x08 // pinsrb xmm8, byte [rsi + r14 + 23], 8 + QUAD $0x170e44203a0f4666; BYTE $0x09 // pinsrb xmm8, byte [rsi + r9 + 23], 9 + QUAD $0x171e44203a0f4666; BYTE $0x0a // pinsrb xmm8, byte [rsi + r11 + 23], 10 + QUAD $0x173e44203a0f4666; BYTE $0x0b // pinsrb xmm8, byte [rsi + r15 + 23], 11 + QUAD $0x172644203a0f4666; BYTE $0x0c // pinsrb xmm8, byte [rsi + r12 + 23], 12 + QUAD $0x170644203a0f4666; BYTE $0x0d // pinsrb xmm8, byte [rsi + r8 + 23], 13 QUAD $0x000001308ddb0f66 // pand xmm1, oword 304[rbp] /* [rip + .LCPI10_19] */ QUAD $0x0000014095db0f66 // pand xmm2, oword 320[rbp] /* [rip + .LCPI10_20] */ LONG $0xd1eb0f66 // por xmm2, xmm1 LONG $0x6f0f4166; BYTE $0xca // movdqa xmm1, xmm10 LONG $0xde0f4166; BYTE $0xcc // pmaxub xmm1, xmm12 LONG $0x740f4166; BYTE $0xca // pcmpeqb xmm1, xmm10 - QUAD $0x172644203a0f4666; BYTE $0x0e // pinsrb xmm8, byte [rsi + r12 + 23], 14 + QUAD $0x170644203a0f4466; BYTE $0x0e // pinsrb xmm8, byte [rsi + rax + 23], 14 QUAD $0x000001508ddb0f66 // pand xmm1, oword 336[rbp] /* [rip + .LCPI10_21] */ LONG $0xcaeb0f66 // por xmm1, xmm2 QUAD $0x172e44203a0f4666; BYTE $0x0f // pinsrb xmm8, byte [rsi + r13 + 23], 15 @@ -51454,18 +52942,18 @@ LBB10_195: LONG $0x6f0f4566; BYTE $0xd0 // movdqa xmm10, xmm8 LONG $0xde0f4566; BYTE $0xd4 // pmaxub xmm10, xmm12 LONG $0x740f4566; BYTE $0xd0 // pcmpeqb xmm10, xmm8 - QUAD $0x190e7c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r9 + 25], 3 + QUAD $0x19167c203a0f4666; BYTE $0x03 // pinsrb xmm15, byte [rsi + r10 + 25], 3 QUAD $0x19167c203a0f4466; BYTE $0x04 // pinsrb xmm15, byte [rsi + rdx + 25], 4 QUAD $0x190e7c203a0f4466; BYTE $0x05 // pinsrb xmm15, byte [rsi + rcx + 25], 5 - QUAD $0x19067c203a0f4466; BYTE $0x06 // pinsrb xmm15, byte [rsi + rax + 25], 6 - QUAD $0x191e7c203a0f4666; BYTE $0x07 // pinsrb xmm15, byte [rsi + r11 + 25], 7 - QUAD $0x193e7c203a0f4466; BYTE $0x08 // pinsrb xmm15, byte [rsi + rdi + 25], 8 - QUAD $0x191e7c203a0f4466; BYTE $0x09 // pinsrb xmm15, byte [rsi + rbx + 25], 9 - QUAD $0x19067c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r8 + 25], 10 - QUAD $0x19367c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r14 + 25], 11 - QUAD $0x193e7c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r15 + 25], 12 - QUAD $0x19167c203a0f4666; BYTE $0x0d // pinsrb xmm15, byte [rsi + r10 + 25], 13 - QUAD $0x19267c203a0f4666; BYTE $0x0e // pinsrb xmm15, byte [rsi + r12 + 25], 14 + QUAD $0x193e7c203a0f4466; BYTE $0x06 // pinsrb xmm15, byte [rsi + rdi + 25], 6 + QUAD $0x191e7c203a0f4466; BYTE $0x07 // pinsrb xmm15, byte [rsi + rbx + 25], 7 + QUAD $0x19367c203a0f4666; BYTE $0x08 // pinsrb xmm15, byte [rsi + r14 + 25], 8 + QUAD $0x190e7c203a0f4666; BYTE $0x09 // pinsrb xmm15, byte [rsi + r9 + 25], 9 + QUAD $0x191e7c203a0f4666; BYTE $0x0a // pinsrb xmm15, byte [rsi + r11 + 25], 10 + QUAD $0x193e7c203a0f4666; BYTE $0x0b // pinsrb xmm15, byte [rsi + r15 + 25], 11 + QUAD $0x19267c203a0f4666; BYTE $0x0c // pinsrb xmm15, byte [rsi + r12 + 25], 12 + QUAD $0x19067c203a0f4666; BYTE $0x0d // pinsrb xmm15, byte [rsi + r8 + 25], 13 + QUAD $0x19067c203a0f4466; BYTE $0x0e // pinsrb xmm15, byte [rsi + rax + 25], 14 QUAD $0x192e7c203a0f4666; BYTE $0x0f // pinsrb xmm15, byte [rsi + r13 + 25], 15 LONG $0x6f0f4466; WORD $0x605d // movdqa xmm11, oword 96[rbp] /* [rip + .LCPI10_6] */ LONG $0xdb0f4566; BYTE $0xd3 // pand xmm10, xmm11 @@ -51473,65 +52961,65 @@ LBB10_195: LONG $0x6f0f4166; BYTE $0xdf // movdqa xmm3, xmm15 LONG $0xde0f4166; BYTE $0xdc // pmaxub xmm3, xmm12 LONG $0x740f4166; BYTE $0xdf // pcmpeqb xmm3, xmm15 - QUAD $0x1a0e74203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r9 + 26], 3 + QUAD $0x1a1674203a0f4266; BYTE $0x03 // pinsrb xmm6, byte [rsi + r10 + 26], 3 QUAD $0x041a1674203a0f66 // pinsrb xmm6, byte [rsi + rdx + 26], 4 QUAD $0x051a0e74203a0f66 // pinsrb xmm6, byte [rsi + rcx + 26], 5 - QUAD $0x061a0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 26], 6 - QUAD $0x1a1e74203a0f4266; BYTE $0x07 // pinsrb xmm6, byte [rsi + r11 + 26], 7 - QUAD $0x081a3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 26], 8 - QUAD $0x091a1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 26], 9 - QUAD $0x1a0674203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r8 + 26], 10 - QUAD $0x1a3674203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r14 + 26], 11 - QUAD $0x1a3e74203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r15 + 26], 12 - QUAD $0x1a1674203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r10 + 26], 13 - QUAD $0x1a2674203a0f4266; BYTE $0x0e // pinsrb xmm6, byte [rsi + r12 + 26], 14 + QUAD $0x061a3e74203a0f66 // pinsrb xmm6, byte [rsi + rdi + 26], 6 + QUAD $0x071a1e74203a0f66 // pinsrb xmm6, byte [rsi + rbx + 26], 7 + QUAD $0x1a3674203a0f4266; BYTE $0x08 // pinsrb xmm6, byte [rsi + r14 + 26], 8 + QUAD $0x1a0e74203a0f4266; BYTE $0x09 // pinsrb xmm6, byte [rsi + r9 + 26], 9 + QUAD $0x1a1e74203a0f4266; BYTE $0x0a // pinsrb xmm6, byte [rsi + r11 + 26], 10 + QUAD $0x1a3e74203a0f4266; BYTE $0x0b // pinsrb xmm6, byte [rsi + r15 + 26], 11 + QUAD $0x1a2674203a0f4266; BYTE $0x0c // pinsrb xmm6, byte [rsi + r12 + 26], 12 + QUAD $0x1a0674203a0f4266; BYTE $0x0d // pinsrb xmm6, byte [rsi + r8 + 26], 13 + QUAD $0x0e1a0674203a0f66 // pinsrb xmm6, byte [rsi + rax + 26], 14 QUAD $0x1a2e74203a0f4266; BYTE $0x0f // pinsrb xmm6, byte [rsi + r13 + 26], 15 - QUAD $0x1b0e7c203a0f4266; BYTE $0x03 // pinsrb xmm7, byte [rsi + r9 + 27], 3 + QUAD $0x1b167c203a0f4266; BYTE $0x03 // pinsrb xmm7, byte [rsi + r10 + 27], 3 QUAD $0x041b167c203a0f66 // pinsrb xmm7, byte [rsi + rdx + 27], 4 QUAD $0x051b0e7c203a0f66 // pinsrb xmm7, byte [rsi + rcx + 27], 5 - QUAD $0x061b067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 27], 6 - QUAD $0x1b1e7c203a0f4266; BYTE $0x07 // pinsrb xmm7, byte [rsi + r11 + 27], 7 - QUAD $0x081b3e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 27], 8 - QUAD $0x091b1e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 27], 9 - QUAD $0x1b067c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r8 + 27], 10 - QUAD $0x1b367c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r14 + 27], 11 - QUAD $0x1b3e7c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r15 + 27], 12 - QUAD $0x1b167c203a0f4266; BYTE $0x0d // pinsrb xmm7, byte [rsi + r10 + 27], 13 - QUAD $0x1b267c203a0f4266; BYTE $0x0e // pinsrb xmm7, byte [rsi + r12 + 27], 14 + QUAD $0x061b3e7c203a0f66 // pinsrb xmm7, byte [rsi + rdi + 27], 6 + QUAD $0x071b1e7c203a0f66 // pinsrb xmm7, byte [rsi + rbx + 27], 7 + QUAD $0x1b367c203a0f4266; BYTE $0x08 // pinsrb xmm7, byte [rsi + r14 + 27], 8 + QUAD $0x1b0e7c203a0f4266; BYTE $0x09 // pinsrb xmm7, byte [rsi + r9 + 27], 9 + QUAD $0x1b1e7c203a0f4266; BYTE $0x0a // pinsrb xmm7, byte [rsi + r11 + 27], 10 + QUAD $0x1b3e7c203a0f4266; BYTE $0x0b // pinsrb xmm7, byte [rsi + r15 + 27], 11 + QUAD $0x1b267c203a0f4266; BYTE $0x0c // pinsrb xmm7, byte [rsi + r12 + 27], 12 + QUAD $0x1b067c203a0f4266; BYTE $0x0d // pinsrb xmm7, byte [rsi + r8 + 27], 13 + QUAD $0x0e1b067c203a0f66 // pinsrb xmm7, byte [rsi + rax + 27], 14 QUAD $0x1b2e7c203a0f4266; BYTE $0x0f // pinsrb xmm7, byte [rsi + r13 + 27], 15 QUAD $0x000001009ddb0f66 // pand xmm3, oword 256[rbp] /* [rip + .LCPI10_16] */ - QUAD $0x000140249cf80f66; BYTE $0x00 // psubb xmm3, oword [rsp + 320] + QUAD $0x0000f0249cf80f66; BYTE $0x00 // psubb xmm3, oword [rsp + 240] LONG $0xd66f0f66 // movdqa xmm2, xmm6 LONG $0xde0f4166; BYTE $0xd4 // pmaxub xmm2, xmm12 LONG $0xd6740f66 // pcmpeqb xmm2, xmm6 LONG $0xcf6f0f66 // movdqa xmm1, xmm7 LONG $0xde0f4166; BYTE $0xcc // pmaxub xmm1, xmm12 LONG $0xcf740f66 // pcmpeqb xmm1, xmm7 - QUAD $0x1c0e4c203a0f4666; BYTE $0x03 // pinsrb xmm9, byte [rsi + r9 + 28], 3 + QUAD $0x1c164c203a0f4666; BYTE $0x03 // pinsrb xmm9, byte [rsi + r10 + 28], 3 QUAD $0x1c164c203a0f4466; BYTE $0x04 // pinsrb xmm9, byte [rsi + rdx + 28], 4 QUAD $0x1c0e4c203a0f4466; BYTE $0x05 // pinsrb xmm9, byte [rsi + rcx + 28], 5 - QUAD $0x1c064c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rax + 28], 6 - QUAD $0x1c1e4c203a0f4666; BYTE $0x07 // pinsrb xmm9, byte [rsi + r11 + 28], 7 - QUAD $0x1c3e4c203a0f4466; BYTE $0x08 // pinsrb xmm9, byte [rsi + rdi + 28], 8 - QUAD $0x1c1e4c203a0f4466; BYTE $0x09 // pinsrb xmm9, byte [rsi + rbx + 28], 9 - QUAD $0x1c064c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r8 + 28], 10 - QUAD $0x1c364c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r14 + 28], 11 - QUAD $0x1c3e4c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r15 + 28], 12 - QUAD $0x1c164c203a0f4666; BYTE $0x0d // pinsrb xmm9, byte [rsi + r10 + 28], 13 - QUAD $0x1c264c203a0f4666; BYTE $0x0e // pinsrb xmm9, byte [rsi + r12 + 28], 14 + QUAD $0x1c3e4c203a0f4466; BYTE $0x06 // pinsrb xmm9, byte [rsi + rdi + 28], 6 + QUAD $0x1c1e4c203a0f4466; BYTE $0x07 // pinsrb xmm9, byte [rsi + rbx + 28], 7 + QUAD $0x1c364c203a0f4666; BYTE $0x08 // pinsrb xmm9, byte [rsi + r14 + 28], 8 + QUAD $0x1c0e4c203a0f4666; BYTE $0x09 // pinsrb xmm9, byte [rsi + r9 + 28], 9 + QUAD $0x1c1e4c203a0f4666; BYTE $0x0a // pinsrb xmm9, byte [rsi + r11 + 28], 10 + QUAD $0x1c3e4c203a0f4666; BYTE $0x0b // pinsrb xmm9, byte [rsi + r15 + 28], 11 + QUAD $0x1c264c203a0f4666; BYTE $0x0c // pinsrb xmm9, byte [rsi + r12 + 28], 12 + QUAD $0x1c064c203a0f4666; BYTE $0x0d // pinsrb xmm9, byte [rsi + r8 + 28], 13 + QUAD $0x1c064c203a0f4466; BYTE $0x0e // pinsrb xmm9, byte [rsi + rax + 28], 14 QUAD $0x1c2e4c203a0f4666; BYTE $0x0f // pinsrb xmm9, byte [rsi + r13 + 28], 15 - QUAD $0x1d0e44203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r9 + 29], 3 + QUAD $0x1d1644203a0f4266; BYTE $0x03 // pinsrb xmm0, byte [rsi + r10 + 29], 3 QUAD $0x041d1644203a0f66 // pinsrb xmm0, byte [rsi + rdx + 29], 4 QUAD $0x051d0e44203a0f66 // pinsrb xmm0, byte [rsi + rcx + 29], 5 - QUAD $0x061d0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 29], 6 - QUAD $0x1d1e44203a0f4266; BYTE $0x07 // pinsrb xmm0, byte [rsi + r11 + 29], 7 - QUAD $0x081d3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 29], 8 - QUAD $0x091d1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 29], 9 - QUAD $0x1d0644203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r8 + 29], 10 - QUAD $0x1d3644203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r14 + 29], 11 - QUAD $0x1d3e44203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r15 + 29], 12 - QUAD $0x1d1644203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r10 + 29], 13 - QUAD $0x1d2644203a0f4266; BYTE $0x0e // pinsrb xmm0, byte [rsi + r12 + 29], 14 + QUAD $0x061d3e44203a0f66 // pinsrb xmm0, byte [rsi + rdi + 29], 6 + QUAD $0x071d1e44203a0f66 // pinsrb xmm0, byte [rsi + rbx + 29], 7 + QUAD $0x1d3644203a0f4266; BYTE $0x08 // pinsrb xmm0, byte [rsi + r14 + 29], 8 + QUAD $0x1d0e44203a0f4266; BYTE $0x09 // pinsrb xmm0, byte [rsi + r9 + 29], 9 + QUAD $0x1d1e44203a0f4266; BYTE $0x0a // pinsrb xmm0, byte [rsi + r11 + 29], 10 + QUAD $0x1d3e44203a0f4266; BYTE $0x0b // pinsrb xmm0, byte [rsi + r15 + 29], 11 + QUAD $0x1d2644203a0f4266; BYTE $0x0c // pinsrb xmm0, byte [rsi + r12 + 29], 12 + QUAD $0x1d0644203a0f4266; BYTE $0x0d // pinsrb xmm0, byte [rsi + r8 + 29], 13 + QUAD $0x0e1d0644203a0f66 // pinsrb xmm0, byte [rsi + rax + 29], 14 QUAD $0x1d2e44203a0f4266; BYTE $0x0f // pinsrb xmm0, byte [rsi + r13 + 29], 15 QUAD $0x0000011095db0f66 // pand xmm2, oword 272[rbp] /* [rip + .LCPI10_17] */ QUAD $0x000001208ddb0f66 // pand xmm1, oword 288[rbp] /* [rip + .LCPI10_18] */ @@ -51543,34 +53031,31 @@ LBB10_195: LONG $0xd86f0f66 // movdqa xmm3, xmm0 LONG $0xde0f4166; BYTE $0xdc // pmaxub xmm3, xmm12 LONG $0xd8740f66 // pcmpeqb xmm3, xmm0 - QUAD $0x1e0e64203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r9 + 30], 3 - QUAD $0x1f0e6c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r9 + 31], 3 + QUAD $0x1e1664203a0f4266; BYTE $0x03 // pinsrb xmm4, byte [rsi + r10 + 30], 3 + QUAD $0x1f166c203a0f4266; BYTE $0x03 // pinsrb xmm5, byte [rsi + r10 + 31], 3 QUAD $0x041e1664203a0f66 // pinsrb xmm4, byte [rsi + rdx + 30], 4 QUAD $0x041f166c203a0f66 // pinsrb xmm5, byte [rsi + rdx + 31], 4 QUAD $0x051e0e64203a0f66 // pinsrb xmm4, byte [rsi + rcx + 30], 5 QUAD $0x051f0e6c203a0f66 // pinsrb xmm5, byte [rsi + rcx + 31], 5 - QUAD $0x061e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 30], 6 - QUAD $0x061f066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 31], 6 - QUAD $0x1e1e64203a0f4266; BYTE $0x07 // pinsrb xmm4, byte [rsi + r11 + 30], 7 - QUAD $0x1f1e6c203a0f4266; BYTE $0x07 // pinsrb xmm5, byte [rsi + r11 + 31], 7 - WORD $0x8948; BYTE $0xf8 // mov rax, rdi - QUAD $0x081e3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 30], 8 - QUAD $0x081f3e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 31], 8 - QUAD $0x091e1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 30], 9 - QUAD $0x091f1e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 31], 9 + QUAD $0x061e3e64203a0f66 // pinsrb xmm4, byte [rsi + rdi + 30], 6 + QUAD $0x061f3e6c203a0f66 // pinsrb xmm5, byte [rsi + rdi + 31], 6 + QUAD $0x071e1e64203a0f66 // pinsrb xmm4, byte [rsi + rbx + 30], 7 + QUAD $0x071f1e6c203a0f66 // pinsrb xmm5, byte [rsi + rbx + 31], 7 + QUAD $0x1e3664203a0f4266; BYTE $0x08 // pinsrb xmm4, byte [rsi + r14 + 30], 8 + QUAD $0x1f366c203a0f4266; BYTE $0x08 // pinsrb xmm5, byte [rsi + r14 + 31], 8 + QUAD $0x1e0e64203a0f4266; BYTE $0x09 // pinsrb xmm4, byte [rsi + r9 + 30], 9 + QUAD $0x1f0e6c203a0f4266; BYTE $0x09 // pinsrb xmm5, byte [rsi + r9 + 31], 9 + QUAD $0x1e1e64203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r11 + 30], 10 + QUAD $0x1f1e6c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r11 + 31], 10 QUAD $0x0000016024b48b4c // mov r14, qword [rsp + 352] - WORD $0x894c; BYTE $0xc0 // mov rax, r8 - QUAD $0x1e0664203a0f4266; BYTE $0x0a // pinsrb xmm4, byte [rsi + r8 + 30], 10 - QUAD $0x1f066c203a0f4266; BYTE $0x0a // pinsrb xmm5, byte [rsi + r8 + 31], 10 - LONG $0x24448b48; BYTE $0x50 // mov rax, qword [rsp + 80] - QUAD $0x0b1e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 30], 11 - QUAD $0x0b1f066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 31], 11 - QUAD $0x1e3e64203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r15 + 30], 12 - QUAD $0x1f3e6c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r15 + 31], 12 - QUAD $0x1e1664203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r10 + 30], 13 - QUAD $0x1f166c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r10 + 31], 13 - QUAD $0x1e2664203a0f4266; BYTE $0x0e // pinsrb xmm4, byte [rsi + r12 + 30], 14 - QUAD $0x1f266c203a0f4266; BYTE $0x0e // pinsrb xmm5, byte [rsi + r12 + 31], 14 + QUAD $0x1e3e64203a0f4266; BYTE $0x0b // pinsrb xmm4, byte [rsi + r15 + 30], 11 + QUAD $0x1f3e6c203a0f4266; BYTE $0x0b // pinsrb xmm5, byte [rsi + r15 + 31], 11 + QUAD $0x1e2664203a0f4266; BYTE $0x0c // pinsrb xmm4, byte [rsi + r12 + 30], 12 + QUAD $0x1f266c203a0f4266; BYTE $0x0c // pinsrb xmm5, byte [rsi + r12 + 31], 12 + QUAD $0x1e0664203a0f4266; BYTE $0x0d // pinsrb xmm4, byte [rsi + r8 + 30], 13 + QUAD $0x1f066c203a0f4266; BYTE $0x0d // pinsrb xmm5, byte [rsi + r8 + 31], 13 + QUAD $0x0e1e0664203a0f66 // pinsrb xmm4, byte [rsi + rax + 30], 14 + QUAD $0x0e1f066c203a0f66 // pinsrb xmm5, byte [rsi + rax + 31], 14 QUAD $0x1e2e64203a0f4266; BYTE $0x0f // pinsrb xmm4, byte [rsi + r13 + 30], 15 QUAD $0x0000013095db0f66 // pand xmm2, oword 304[rbp] /* [rip + .LCPI10_19] */ QUAD $0x000001409ddb0f66 // pand xmm3, oword 320[rbp] /* [rip + .LCPI10_20] */ @@ -51589,7 +53074,7 @@ LBB10_195: LONG $0xc8eb0f66 // por xmm1, xmm0 LONG $0x6f0f4166; BYTE $0xc2 // movdqa xmm0, xmm10 LONG $0xc1600f66 // punpcklbw xmm0, xmm1 - QUAD $0x0000a024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 160] + QUAD $0x00011024a46f0f66; BYTE $0x00 // movdqa xmm4, oword [rsp + 272] LONG $0xd46f0f66 // movdqa xmm2, xmm4 LONG $0x600f4166; BYTE $0xd6 // punpcklbw xmm2, xmm14 LONG $0xda6f0f66 // movdqa xmm3, xmm2 @@ -51606,25 +53091,24 @@ LBB10_195: LONG $0x7f0f41f3; WORD $0x8e54; BYTE $0x10 // movdqu oword [r14 + 4*rcx + 16], xmm2 LONG $0x7f0f41f3; WORD $0x8e1c // movdqu oword [r14 + 4*rcx], xmm3 LONG $0x10c18348 // add rcx, 16 - WORD $0x8948; BYTE $0xca // mov rdx, rcx + WORD $0x8948; BYTE $0xc8 // mov rax, rcx QUAD $0x000001a0248c3b48 // cmp rcx, qword [rsp + 416] - JNE LBB10_195 + JNE LBB10_67 QUAD $0x000001d024bc8b4c // mov r15, qword [rsp + 464] QUAD $0x000001a024bc3b4c // cmp r15, qword [rsp + 416] LONG $0x245c8a44; BYTE $0x08 // mov r11b, byte [rsp + 8] QUAD $0x0000018824b48b48 // mov rsi, qword [rsp + 392] LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] - JNE LBB10_67 - JMP LBB10_132 + JNE LBB10_69 + JMP LBB10_72 -LBB10_197: - WORD $0x894c; BYTE $0xf8 // mov rax, r15 - LONG $0xf8e08348 // and rax, -8 - WORD $0x8949; BYTE $0xc3 // mov r11, rax +LBB10_124: + LONG $0xf8e78349 // and r15, -8 + WORD $0x894d; BYTE $0xfb // mov r11, r15 LONG $0x06e3c149 // shl r11, 6 WORD $0x0149; BYTE $0xf3 // add r11, rsi - QUAD $0x0000019024848948 // mov qword [rsp + 400], rax - LONG $0x86048d49 // lea rax, [r14 + 4*rax] + QUAD $0x0000019024bc894c // mov qword [rsp + 400], r15 + LONG $0xbe048d4b // lea rax, [r14 + 4*r15] LONG $0x24448948; BYTE $0x08 // mov qword [rsp + 8], rax QUAD $0x00018824846e0f66; BYTE $0x00 // movd xmm0, dword [rsp + 392] LONG $0xc0700ff2; BYTE $0xe0 // pshuflw xmm0, xmm0, 224 @@ -51632,7 +53116,7 @@ LBB10_197: QUAD $0x0001d024847f0f66; BYTE $0x00 // movdqa oword [rsp + 464], xmm0 WORD $0x3145; BYTE $0xd2 // xor r10d, r10d -LBB10_198: +LBB10_125: WORD $0x894d; BYTE $0xd1 // mov r9, r10 LONG $0x06e1c149 // shl r9, 6 WORD $0x894d; BYTE $0xc8 // mov r8, r9 @@ -51728,7 +53212,7 @@ LBB10_198: QUAD $0x06082664c40f4666 // pinsrw xmm12, word [rsi + r12 + 8], 6 QUAD $0x07082e64c40f4666 // pinsrw xmm12, word [rsi + r13 + 8], 7 LONG $0x650f4166; BYTE $0xfa // pcmpgtw xmm7, xmm10 - LONG $0x7c7f0f66; WORD $0x7024 // movdqa oword [rsp + 112], xmm7 + QUAD $0x00009024bc7f0f66; BYTE $0x00 // movdqa oword [rsp + 144], xmm7 LONG $0xf86f0f66 // movdqa xmm7, xmm0 LONG $0x650f4166; BYTE $0xfc // pcmpgtw xmm7, xmm12 LONG $0x7c7f0f66; WORD $0x2024 // movdqa oword [rsp + 32], xmm7 @@ -51787,7 +53271,7 @@ LBB10_198: QUAD $0x05143e5cc40f4266 // pinsrw xmm3, word [rsi + r15 + 20], 5 QUAD $0x0614265cc40f4266 // pinsrw xmm3, word [rsi + r12 + 20], 6 LONG $0xca650f66 // pcmpgtw xmm1, xmm2 - QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 + QUAD $0x0000a0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm1 QUAD $0x07142e5cc40f4266 // pinsrw xmm3, word [rsi + r13 + 20], 7 LONG $0xc86f0f66 // movdqa xmm1, xmm0 LONG $0xcb650f66 // pcmpgtw xmm1, xmm3 @@ -51808,7 +53292,7 @@ LBB10_198: QUAD $0x0618266cc40f4266 // pinsrw xmm5, word [rsi + r12 + 24], 6 QUAD $0x07182e6cc40f4266 // pinsrw xmm5, word [rsi + r13 + 24], 7 LONG $0xcc650f66 // pcmpgtw xmm1, xmm4 - QUAD $0x0000c0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm1 + QUAD $0x0000b0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 176], xmm1 LONG $0xc86f0f66 // movdqa xmm1, xmm0 LONG $0xcd650f66 // pcmpgtw xmm1, xmm5 LONG $0x4c7f0f66; WORD $0x1024 // movdqa oword [rsp + 16], xmm1 @@ -51827,7 +53311,7 @@ LBB10_198: QUAD $0x051c3e7cc40f4666 // pinsrw xmm15, word [rsi + r15 + 28], 5 QUAD $0x061c267cc40f4666 // pinsrw xmm15, word [rsi + r12 + 28], 6 LONG $0xce650f66 // pcmpgtw xmm1, xmm6 - QUAD $0x0000d0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm1 + QUAD $0x0000c0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 192], xmm1 QUAD $0x071c2e7cc40f4666 // pinsrw xmm15, word [rsi + r13 + 28], 7 LONG $0xca6e0f66 // movd xmm1, edx LONG $0x4cc40f66; WORD $0x1e0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 30], 1 @@ -51838,11 +53322,11 @@ LBB10_198: QUAD $0x061e264cc40f4266 // pinsrw xmm1, word [rsi + r12 + 30], 6 LONG $0xd06f0f66 // movdqa xmm2, xmm0 LONG $0x650f4166; BYTE $0xd7 // pcmpgtw xmm2, xmm15 - QUAD $0x0000f024947f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm2 + QUAD $0x0000e024947f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm2 QUAD $0x071e2e4cc40f4266 // pinsrw xmm1, word [rsi + r13 + 30], 7 LONG $0xd06f0f66 // movdqa xmm2, xmm0 LONG $0xd1650f66 // pcmpgtw xmm2, xmm1 - QUAD $0x00009024947f0f66; BYTE $0x00 // movdqa oword [rsp + 144], xmm2 + LONG $0x547f0f66; WORD $0x7024 // movdqa oword [rsp + 112], xmm2 LONG $0x44b70f42; WORD $0x200e // movzx eax, word [rsi + r9 + 32] LONG $0xc86e0f66 // movd xmm1, eax LONG $0x4cc40f66; WORD $0x200e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 32], 1 @@ -51863,10 +53347,10 @@ LBB10_198: QUAD $0x07222e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 34], 7 LONG $0xd86f0f66 // movdqa xmm3, xmm0 LONG $0xd9650f66 // pcmpgtw xmm3, xmm1 - QUAD $0x0000e0249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 224], xmm3 + QUAD $0x0000d0249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 208], xmm3 LONG $0xc86f0f66 // movdqa xmm1, xmm0 LONG $0xca650f66 // pcmpgtw xmm1, xmm2 - QUAD $0x000120248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 288], xmm1 + QUAD $0x000100248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm1 LONG $0x44b70f42; WORD $0x240e // movzx eax, word [rsi + r9 + 36] LONG $0xc86e0f66 // movd xmm1, eax LONG $0x4cc40f66; WORD $0x240e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 36], 1 @@ -51886,11 +53370,11 @@ LBB10_198: QUAD $0x06262654c40f4266 // pinsrw xmm2, word [rsi + r12 + 38], 6 LONG $0xd86f0f66 // movdqa xmm3, xmm0 LONG $0xd9650f66 // pcmpgtw xmm3, xmm1 - QUAD $0x000100249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 256], xmm3 + QUAD $0x000120249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 288], xmm3 QUAD $0x07262e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 38], 7 LONG $0xc86f0f66 // movdqa xmm1, xmm0 LONG $0xca650f66 // pcmpgtw xmm1, xmm2 - QUAD $0x000110248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm1 + QUAD $0x000130248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 304], xmm1 LONG $0x44b70f42; WORD $0x280e // movzx eax, word [rsi + r9 + 40] LONG $0xc86e0f66 // movd xmm1, eax LONG $0x4cc40f66; WORD $0x280e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 40], 1 @@ -51911,10 +53395,10 @@ LBB10_198: QUAD $0x072a2e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 42], 7 LONG $0xd86f0f66 // movdqa xmm3, xmm0 LONG $0xd9650f66 // pcmpgtw xmm3, xmm1 - QUAD $0x000130249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 304], xmm3 + QUAD $0x000150249c7f0f66; BYTE $0x00 // movdqa oword [rsp + 336], xmm3 LONG $0xc86f0f66 // movdqa xmm1, xmm0 LONG $0xca650f66 // pcmpgtw xmm1, xmm2 - QUAD $0x000150248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 336], xmm1 + QUAD $0x000110248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 272], xmm1 LONG $0x44b70f42; WORD $0x2c0e // movzx eax, word [rsi + r9 + 44] LONG $0xc86e0f66 // movd xmm1, eax LONG $0x4cc40f66; WORD $0x2c0e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 44], 1 @@ -51938,7 +53422,7 @@ LBB10_198: QUAD $0x072e2e54c40f4266 // pinsrw xmm2, word [rsi + r13 + 46], 7 LONG $0xc86f0f66 // movdqa xmm1, xmm0 LONG $0xca650f66 // pcmpgtw xmm1, xmm2 - QUAD $0x0000a0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 160], xmm1 + QUAD $0x0000f0248c7f0f66; BYTE $0x00 // movdqa oword [rsp + 240], xmm1 LONG $0x44b70f42; WORD $0x300e // movzx eax, word [rsi + r9 + 48] LONG $0xc86e0f66 // movd xmm1, eax LONG $0x4cc40f66; WORD $0x300e; BYTE $0x01 // pinsrw xmm1, word [rsi + rcx + 48], 1 @@ -52047,7 +53531,7 @@ LBB10_198: LONG $0x760f4566; BYTE $0xc0 // pcmpeqd xmm8, xmm8 LONG $0xc0630f66 // packsswb xmm0, xmm0 LONG $0xf8f80f66 // psubb xmm7, xmm0 - LONG $0x546f0f66; WORD $0x7024 // movdqa xmm2, oword [rsp + 112] + QUAD $0x00009024946f0f66; BYTE $0x00 // movdqa xmm2, oword [rsp + 144] LONG $0xd2630f66 // packsswb xmm2, xmm2 QUAD $0x0000a09d6f0f4466; BYTE $0x00 // movdqa xmm11, oword 160[rbp] /* [rip + .LCPI10_10] */ LONG $0xc26f0f66 // movdqa xmm0, xmm2 @@ -52077,7 +53561,7 @@ LBB10_198: LONG $0xc26f0f66 // movdqa xmm0, xmm2 LONG $0x380f4466; WORD $0xf410 // pblendvb xmm14, xmm4, xmm0 LONG $0xeb0f4166; BYTE $0xcb // por xmm1, xmm11 - QUAD $0x0000b024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 176] + QUAD $0x0000a024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 160] LONG $0xc0630f66 // packsswb xmm0, xmm0 LONG $0x6f0f4466; BYTE $0xdb // movdqa xmm11, xmm3 LONG $0xfb6f0f66 // movdqa xmm7, xmm3 @@ -52092,7 +53576,7 @@ LBB10_198: QUAD $0x000000909d6f0f66 // movdqa xmm3, oword 144[rbp] /* [rip + .LCPI10_9] */ LONG $0xcb6f0f66 // movdqa xmm1, xmm3 LONG $0x10380f66; BYTE $0xcc // pblendvb xmm1, xmm4, xmm0 - QUAD $0x0000c024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 192] + QUAD $0x0000b024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 176] LONG $0xc0630f66 // packsswb xmm0, xmm0 QUAD $0x0000a0856f0f4466; BYTE $0x00 // movdqa xmm8, oword 160[rbp] /* [rip + .LCPI10_10] */ LONG $0x6f0f4166; BYTE $0xd0 // movdqa xmm2, xmm8 @@ -52103,42 +53587,42 @@ LBB10_198: LONG $0xc0630f66 // packsswb xmm0, xmm0 QUAD $0x000000b08d6f0f66 // movdqa xmm1, oword 176[rbp] /* [rip + .LCPI10_11] */ LONG $0x10380f66; BYTE $0xcc // pblendvb xmm1, xmm4, xmm0 - QUAD $0x0000d024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 208] + QUAD $0x0000c024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 192] LONG $0xc0630f66 // packsswb xmm0, xmm0 LONG $0x380f4466; WORD $0xd410 // pblendvb xmm10, xmm4, xmm0 LONG $0xeb0f4466; BYTE $0xd1 // por xmm10, xmm1 - QUAD $0x0000f024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 240] + QUAD $0x0000e024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 224] LONG $0xc0630f66 // packsswb xmm0, xmm0 QUAD $0x000000d08d6f0f66 // movdqa xmm1, oword 208[rbp] /* [rip + .LCPI10_13] */ LONG $0x10380f66; BYTE $0xcc // pblendvb xmm1, xmm4, xmm0 LONG $0xeb0f4166; BYTE $0xca // por xmm1, xmm10 LONG $0xcaeb0f66 // por xmm1, xmm2 - QUAD $0x00009024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 144] + LONG $0x446f0f66; WORD $0x7024 // movdqa xmm0, oword [rsp + 112] LONG $0xc0630f66 // packsswb xmm0, xmm0 QUAD $0x0000e0956f0f4466; BYTE $0x00 // movdqa xmm10, oword 224[rbp] /* [rip + .LCPI10_14] */ LONG $0x380f4466; WORD $0xd410 // pblendvb xmm10, xmm4, xmm0 LONG $0xeb0f4466; BYTE $0xd1 // por xmm10, xmm1 - QUAD $0x00012024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 288] + QUAD $0x00010024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 256] LONG $0xc0630f66 // packsswb xmm0, xmm0 LONG $0x6f0f4166; BYTE $0xcb // movdqa xmm1, xmm11 LONG $0x10380f66; BYTE $0xcc // pblendvb xmm1, xmm4, xmm0 - QUAD $0x00010024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 256] + QUAD $0x00012024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 288] LONG $0xc0630f66 // packsswb xmm0, xmm0 LONG $0xd36f0f66 // movdqa xmm2, xmm3 LONG $0x10380f66; BYTE $0xd4 // pblendvb xmm2, xmm4, xmm0 - QUAD $0x0000e024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 224] + QUAD $0x0000d024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 208] QUAD $0x0000016085ef0f66 // pxor xmm0, oword 352[rbp] /* [rip + .LCPI10_22] */ LONG $0xdb760f66 // pcmpeqd xmm3, xmm3 LONG $0xc0630f66 // packsswb xmm0, xmm0 LONG $0xc8f80f66 // psubb xmm1, xmm0 - QUAD $0x00011024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 272] + QUAD $0x00013024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 304] LONG $0xc0630f66 // packsswb xmm0, xmm0 LONG $0x6f0f4166; BYTE $0xf8 // movdqa xmm7, xmm8 LONG $0x10380f66; BYTE $0xfc // pblendvb xmm7, xmm4, xmm0 LONG $0xfaeb0f66 // por xmm7, xmm2 - QUAD $0x00013024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 304] + QUAD $0x00015024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 336] LONG $0xc0630f66 // packsswb xmm0, xmm0 - QUAD $0x0150249c6f0f4466; WORD $0x0000 // movdqa xmm11, oword [rsp + 336] + QUAD $0x0110249c6f0f4466; WORD $0x0000 // movdqa xmm11, oword [rsp + 272] LONG $0x630f4566; BYTE $0xdb // packsswb xmm11, xmm11 LONG $0xf9eb0f66 // por xmm7, xmm1 QUAD $0x000000b08d6f0f66 // movdqa xmm1, oword 176[rbp] /* [rip + .LCPI10_11] */ @@ -52152,7 +53636,7 @@ LBB10_198: QUAD $0x000000d08d6f0f66 // movdqa xmm1, oword 208[rbp] /* [rip + .LCPI10_13] */ LONG $0x10380f66; BYTE $0xcc // pblendvb xmm1, xmm4, xmm0 LONG $0xcaeb0f66 // por xmm1, xmm2 - QUAD $0x0000a024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 160] + QUAD $0x0000f024846f0f66; BYTE $0x00 // movdqa xmm0, oword [rsp + 240] LONG $0xc0630f66 // packsswb xmm0, xmm0 QUAD $0x0170249c6f0f4466; WORD $0x0000 // movdqa xmm11, oword [rsp + 368] LONG $0x630f4566; BYTE $0xdb // packsswb xmm11, xmm11 @@ -52210,28 +53694,28 @@ LBB10_198: LONG $0x7f0f43f3; WORD $0x9644; BYTE $0x10 // movdqu oword [r14 + 4*r10 + 16], xmm0 LONG $0x08c28349 // add r10, 8 QUAD $0x0000019024943b4c // cmp r10, qword [rsp + 400] - JNE LBB10_198 + JNE LBB10_125 QUAD $0x000001c824bc8b4c // mov r15, qword [rsp + 456] QUAD $0x0000019024bc3b4c // cmp r15, qword [rsp + 400] LONG $0x24548b4c; BYTE $0x48 // mov r10, qword [rsp + 72] LONG $0x24648b4c; BYTE $0x08 // mov r12, qword [rsp + 8] - JNE LBB10_101 - JMP LBB10_136 + JNE LBB10_127 + JMP LBB10_130 -LBB10_200: - WORD $0x894d; BYTE $0xd8 // mov r8, r11 - LONG $0xfce08349 // and r8, -4 - WORD $0x894c; BYTE $0xc3 // mov rbx, r8 - LONG $0x07e3c148 // shl rbx, 7 - WORD $0x0148; BYTE $0xf3 // add rbx, rsi - LONG $0x863c8d4f // lea r15, [r14 + 4*r8] +LBB10_182: + WORD $0x894c; BYTE $0xd8 // mov rax, r11 + LONG $0xfce08348 // and rax, -4 + WORD $0x8948; BYTE $0xc2 // mov rdx, rax + LONG $0x07e2c148 // shl rdx, 7 + WORD $0x0148; BYTE $0xf2 // add rdx, rsi + LONG $0x863c8d4d // lea r15, [r14 + 4*rax] LONG $0xeb280f45 // movaps xmm13, xmm11 LONG $0xebc60f45; BYTE $0x00 // shufps xmm13, xmm11, 0 LONG $0xfcc68148; WORD $0x0001; BYTE $0x00 // add rsi, 508 WORD $0xc931 // xor ecx, ecx LONG $0x6f0f4466; WORD $0x007d // movdqa xmm15, oword 0[rbp] /* [rip + .LCPI10_0] */ -LBB10_201: +LBB10_183: QUAD $0xfffffe049e100ff3 // movss xmm3, dword [rsi - 508] QUAD $0xfffe0896100f44f3; BYTE $0xff // movss xmm10, dword [rsi - 504] QUAD $0xfffe0c8e100f44f3; BYTE $0xff // movss xmm9, dword [rsi - 500] @@ -52618,11 +54102,11 @@ LBB10_201: LONG $0x7f0f45f3; WORD $0x8e04 // movdqu oword [r14 + 4*rcx], xmm8 LONG $0x04c18348 // add rcx, 4 LONG $0x00c68148; WORD $0x0002; BYTE $0x00 // add rsi, 512 - WORD $0x3949; BYTE $0xc8 // cmp r8, rcx - JNE LBB10_201 - WORD $0x394d; BYTE $0xc3 // cmp r11, r8 - JNE LBB10_124 - JMP LBB10_140 + WORD $0x3948; BYTE $0xc8 // cmp rax, rcx + JNE LBB10_183 + WORD $0x3949; BYTE $0xc3 // cmp r11, rax + JNE LBB10_185 + JMP LBB10_188 DATA LCDATA8<>+0x000(SB)/8, $0x0000000001010101 DATA LCDATA8<>+0x008(SB)/8, $0x0000000000000000 diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparisons.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparisons.go similarity index 79% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparisons.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparisons.go index 8a957eaf..e4a50540 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparisons.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/scalar_comparisons.go @@ -23,34 +23,35 @@ import ( "fmt" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/scalar" - "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/scalar" + "github.com/apache/arrow-go/v18/internal/bitutils" ) type binaryKernel func(left, right, out []byte, offset int) -type cmpFn[LeftT, RightT exec.FixedWidthTypes] func([]LeftT, []RightT, []uint32) -type cmpScalarLeft[LeftT, RightT exec.FixedWidthTypes] func(LeftT, []RightT, []uint32) -type cmpScalarRight[LeftT, RightT exec.FixedWidthTypes] func([]LeftT, RightT, []uint32) +type cmpFn[LeftT, RightT arrow.FixedWidthType] func([]LeftT, []RightT, []uint32) +type cmpScalarLeft[LeftT, RightT arrow.FixedWidthType] func(LeftT, []RightT, []uint32) +type cmpScalarRight[LeftT, RightT arrow.FixedWidthType] func([]LeftT, RightT, []uint32) -type cmpOp[T exec.FixedWidthTypes] struct { +type cmpOp[T arrow.FixedWidthType] struct { arrArr cmpFn[T, T] arrScalar cmpScalarRight[T, T] scalarArr cmpScalarLeft[T, T] } -func comparePrimitiveArrayArray[T exec.FixedWidthTypes](op cmpFn[T, T]) binaryKernel { +func comparePrimitiveArrayArray[T arrow.FixedWidthType](op cmpFn[T, T]) binaryKernel { return func(leftBytes, rightBytes, out []byte, offset int) { const batchSize = 32 var ( - left = exec.GetData[T](leftBytes) - right = exec.GetData[T](rightBytes) + left = arrow.GetData[T](leftBytes) + right = arrow.GetData[T](rightBytes) nvals = len(left) nbatches = nvals / batchSize tmpOutput [batchSize]uint32 @@ -83,11 +84,11 @@ func comparePrimitiveArrayArray[T exec.FixedWidthTypes](op cmpFn[T, T]) binaryKe } } -func comparePrimitiveArrayScalar[T exec.FixedWidthTypes](op cmpScalarRight[T, T]) binaryKernel { +func comparePrimitiveArrayScalar[T arrow.FixedWidthType](op cmpScalarRight[T, T]) binaryKernel { return func(leftBytes, rightBytes, out []byte, offset int) { const batchSize = 32 var ( - left = exec.GetData[T](leftBytes) + left = arrow.GetData[T](leftBytes) rightVal = *(*T)(unsafe.Pointer(&rightBytes[0])) nvals = len(left) nbatches = nvals / batchSize @@ -121,12 +122,12 @@ func comparePrimitiveArrayScalar[T exec.FixedWidthTypes](op cmpScalarRight[T, T] } } -func comparePrimitiveScalarArray[T exec.FixedWidthTypes](op cmpScalarLeft[T, T]) binaryKernel { +func comparePrimitiveScalarArray[T arrow.FixedWidthType](op cmpScalarLeft[T, T]) binaryKernel { return func(leftBytes, rightBytes, out []byte, offset int) { const batchSize = 32 var ( leftVal = *(*T)(unsafe.Pointer(&leftBytes[0])) - right = exec.GetData[T](rightBytes) + right = arrow.GetData[T](rightBytes) nvals = len(right) nbatches = nvals / batchSize @@ -181,7 +182,7 @@ func getOffsetSpanBytes(span *exec.ArraySpan) []byte { return buf[start : start+(span.Len*byteWidth)] } -func compareKernel[T exec.FixedWidthTypes](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { +func compareKernel[T arrow.FixedWidthType](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { kn := ctx.Kernel.(*exec.ScalarKernel) knData := kn.Data.(CompareFuncData).Funcs() @@ -202,7 +203,7 @@ func compareKernel[T exec.FixedWidthTypes](ctx *exec.KernelCtx, batch *exec.Exec return nil } -func genGoCompareKernel[T exec.FixedWidthTypes](op *cmpOp[T]) *CompareData { +func genGoCompareKernel[T arrow.FixedWidthType](op *cmpOp[T]) *CompareData { return &CompareData{ funcAA: comparePrimitiveArrayArray(op.arrArr), funcAS: comparePrimitiveArrayScalar(op.arrScalar), @@ -376,7 +377,7 @@ func genDecimalCompareKernel[T decimal128.Num | decimal256.Num](op CompareOperat return } -func getCmpOp[T exec.NumericTypes](op CompareOperator) *cmpOp[T] { +func getCmpOp[T arrow.NumericType](op CompareOperator) *cmpOp[T] { switch op { case CmpEQ: return &cmpOp[T]{ @@ -524,7 +525,7 @@ func getBinaryCmp(op CompareOperator) binaryBinOp[bool] { return nil } -func numericCompareKernel[T exec.NumericTypes](ty exec.InputType, op CompareOperator) (kn exec.ScalarKernel) { +func numericCompareKernel[T arrow.NumericType](ty exec.InputType, op CompareOperator) (kn exec.ScalarKernel) { ex := compareKernel[T] kn = exec.NewScalarKernelWithSig(&exec.KernelSignature{ InputTypes: []exec.InputType{ty, ty}, @@ -699,3 +700,100 @@ func CompareKernels(op CompareOperator) []exec.ScalarKernel { return kns } + +func isNullExec(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { + out.Release() + input := batch.Values[0].Array + + validityBuf := input.GetBuffer(0) + out.Buffers[1].WrapBuffer(ctx.AllocateBitmap(input.Len)) + if validityBuf != nil { + bitutil.InvertBitmap(validityBuf.Bytes(), int(input.Offset), int(input.Len), + out.Buffers[1].Buf, 0) + } + + return nil +} + +func isNotNullExec(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { + out.Release() + input := batch.Values[0].Array + + validityBuf := input.GetBuffer(0) + if validityBuf == nil { + out.Buffers[1].WrapBuffer(ctx.AllocateBitmap(input.Len)) + memory.Set(out.Buffers[1].Buf, 0xFF) + } else { + out.Buffers[1].SetBuffer(validityBuf) + } + + return nil +} + +func IsNullNotNullKernels() []exec.ScalarKernel { + in := exec.InputType{Kind: exec.InputAny} + out := exec.NewOutputType(arrow.FixedWidthTypes.Boolean) + + results := make([]exec.ScalarKernel, 2) + results[0] = exec.NewScalarKernel([]exec.InputType{in}, out, isNullExec, nil) + results[0].NullHandling = exec.NullComputedNoPrealloc + results[0].MemAlloc = exec.MemNoPrealloc + + results[1] = exec.NewScalarKernel([]exec.InputType{in}, out, isNotNullExec, nil) + results[1].NullHandling = exec.NullComputedNoPrealloc + results[1].MemAlloc = exec.MemNoPrealloc + + return results +} + +func ConstBoolExec(val bool) func(*exec.KernelCtx, *exec.ExecSpan, *exec.ExecResult) error { + return func(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { + bitutil.SetBitsTo(out.Buffers[1].Buf, out.Offset, batch.Len, val) + return nil + } +} + +func isNanKernelExec[T float32 | float64](ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { + kn := ctx.Kernel.(*exec.ScalarKernel) + knData := kn.Data.(CompareFuncData).Funcs() + + outPrefix := int(out.Offset % 8) + outBuf := out.Buffers[1].Buf[out.Offset/8:] + + inputBytes := getOffsetSpanBytes(&batch.Values[0].Array) + knData.funcAA(inputBytes, inputBytes, outBuf, outPrefix) + return nil +} + +func IsNaNKernels() []exec.ScalarKernel { + outputType := exec.NewOutputType(arrow.FixedWidthTypes.Boolean) + + knFloat32 := exec.NewScalarKernel([]exec.InputType{exec.NewExactInput(arrow.PrimitiveTypes.Float32)}, + outputType, isNanKernelExec[float32], nil) + knFloat32.Data = genCompareKernel[float32](CmpNE) + knFloat32.NullHandling = exec.NullNoOutput + knFloat64 := exec.NewScalarKernel([]exec.InputType{exec.NewExactInput(arrow.PrimitiveTypes.Float64)}, + outputType, isNanKernelExec[float64], nil) + knFloat64.Data = genCompareKernel[float64](CmpNE) + knFloat64.NullHandling = exec.NullNoOutput + + kernels := []exec.ScalarKernel{knFloat32, knFloat64} + + for _, dt := range intTypes { + kn := exec.NewScalarKernel( + []exec.InputType{exec.NewExactInput(dt)}, + outputType, ConstBoolExec(false), nil) + kn.NullHandling = exec.NullNoOutput + kernels = append(kernels, kn) + } + + for _, id := range []arrow.Type{arrow.NULL, arrow.DURATION, arrow.DECIMAL32, arrow.DECIMAL64, arrow.DECIMAL128, arrow.DECIMAL256} { + kn := exec.NewScalarKernel( + []exec.InputType{exec.NewIDInput(id)}, + outputType, ConstBoolExec(false), nil) + kn.NullHandling = exec.NullNoOutput + kernels = append(kernels, kn) + } + + return kernels +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/string_casts.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/string_casts.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/string_casts.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/string_casts.go index 30705146..1f4b2218 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/string_casts.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/string_casts.go @@ -23,12 +23,12 @@ import ( "strconv" "unicode/utf8" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/internal/bitutils" ) func validateUtf8Fsb(input *exec.ArraySpan) error { @@ -116,7 +116,7 @@ func CastBinaryToBinary[InOffsetsT, OutOffsetsT int32 | int64](ctx *exec.KernelC outOffsets := exec.GetSpanOffsets[OutOffsetsT](out, 1) castNumericUnsafe(arrow.INT64, arrow.INT32, - exec.GetBytes(inputOffsets), exec.GetBytes(outOffsets), len(inputOffsets)) + arrow.GetBytes(inputOffsets), arrow.GetBytes(outOffsets), len(inputOffsets)) return nil default: // upcast from int32 -> int64 @@ -127,7 +127,7 @@ func CastBinaryToBinary[InOffsetsT, OutOffsetsT int32 | int64](ctx *exec.KernelC outOffsets := exec.GetSpanOffsets[OutOffsetsT](out, 1) castNumericUnsafe(arrow.INT32, arrow.INT64, - exec.GetBytes(inputOffsets), exec.GetBytes(outOffsets), len(inputOffsets)) + arrow.GetBytes(inputOffsets), arrow.GetBytes(outOffsets), len(inputOffsets)) return nil } } @@ -201,8 +201,8 @@ func GetFsbCastKernels() []exec.ScalarKernel { func float16Formatter(v float16.Num) string { return v.String() } func date32Formatter(v arrow.Date32) string { return v.FormattedString() } func date64Formatter(v arrow.Date64) string { return v.FormattedString() } -func numericFormatterSigned[T exec.IntTypes](v T) string { return strconv.FormatInt(int64(v), 10) } -func numericFormatterUnsigned[T exec.UintTypes](v T) string { return strconv.FormatUint(uint64(v), 10) } +func numericFormatterSigned[T arrow.IntType](v T) string { return strconv.FormatInt(int64(v), 10) } +func numericFormatterUnsigned[T arrow.UintType](v T) string { return strconv.FormatUint(uint64(v), 10) } func float32Formatter(v float32) string { return strconv.FormatFloat(float64(v), 'g', -1, 32) } func float64Formatter(v float64) string { return strconv.FormatFloat(v, 'g', -1, 64) } @@ -247,7 +247,7 @@ func timeToStringCastExec[T timeIntrinsic](ctx *exec.KernelCtx, batch *exec.Exec return nil } -func numericToStringCastExec[T exec.IntTypes | exec.UintTypes | exec.FloatTypes](formatter func(T) string) exec.ArrayKernelExec { +func numericToStringCastExec[T arrow.IntType | arrow.UintType | arrow.FloatType](formatter func(T) string) exec.ArrayKernelExec { return func(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { var ( input = &batch.Values[0].Array diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/types.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/types.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/types.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/types.go index 2788fb70..29cb6f54 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/types.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/types.go @@ -21,10 +21,10 @@ package kernels import ( "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/scalar" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_hash.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_hash.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_hash.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_hash.go index e0ede826..51968f79 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_hash.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_hash.go @@ -21,13 +21,13 @@ package kernels import ( "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/bitutils" - "github.com/apache/arrow/go/v14/internal/hashing" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/bitutils" + "github.com/apache/arrow-go/v18/internal/hashing" ) type HashState interface { @@ -178,7 +178,7 @@ func doAppendFixedSize(action Action, memo hashing.MemoTable, arr *exec.ArraySpa }) } -func doAppendNumeric[T exec.IntTypes | exec.UintTypes | exec.FloatTypes](action Action, memo hashing.MemoTable, arr *exec.ArraySpan) error { +func doAppendNumeric[T arrow.IntType | arrow.UintType | arrow.FloatType](action Action, memo hashing.MemoTable, arr *exec.ArraySpan) error { arrData := exec.GetSpanValues[T](arr, 1) shouldEncodeNulls := action.ShouldEncodeNulls() return bitutils.VisitBitBlocksShort(arr.Buffers[0].Buf, arr.Offset, arr.Len, diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_run_end_encode.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_run_end_encode.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_run_end_encode.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_run_end_encode.go index e073ff1f..faedcfba 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_run_end_encode.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_run_end_encode.go @@ -24,14 +24,14 @@ import ( "sort" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" ) type RunEndEncodeState struct { @@ -46,18 +46,18 @@ type RunEndsType interface { int16 | int32 | int64 } -func readFixedWidthVal[V exec.FixedWidthTypes](inputValidity, inputValues []byte, offset int64, out *V) bool { +func readFixedWidthVal[V arrow.FixedWidthType](inputValidity, inputValues []byte, offset int64, out *V) bool { sz := int64(unsafe.Sizeof(*out)) *out = *(*V)(unsafe.Pointer(&inputValues[offset*sz])) return bitutil.BitIsSet(inputValidity, int(offset)) } -func writeFixedWidthVal[V exec.FixedWidthTypes](result *exec.ExecResult, offset int64, valid bool, value V) { +func writeFixedWidthVal[V arrow.FixedWidthType](result *exec.ExecResult, offset int64, valid bool, value V) { if len(result.Buffers[0].Buf) != 0 { bitutil.SetBitTo(result.Buffers[0].Buf, int(offset), valid) } - arr := exec.GetData[V](result.Buffers[1].Buf) + arr := arrow.GetData[V](result.Buffers[1].Buf) arr[offset] = value } @@ -73,7 +73,7 @@ func writeBoolVal(result *exec.ExecResult, offset int64, valid bool, value bool) bitutil.SetBitTo(result.Buffers[1].Buf, int(offset), value) } -type runEndEncodeLoopFixedWidth[R RunEndsType, V exec.FixedWidthTypes | bool] struct { +type runEndEncodeLoopFixedWidth[R RunEndsType, V arrow.FixedWidthType | bool] struct { inputLen, inputOffset int64 inputValidity []byte inputValues []byte @@ -84,7 +84,7 @@ type runEndEncodeLoopFixedWidth[R RunEndsType, V exec.FixedWidthTypes | bool] st } func (re *runEndEncodeLoopFixedWidth[R, V]) WriteEncodedRuns(out *exec.ExecResult) int64 { - outputRunEnds := exec.GetData[R](out.Children[0].Buffers[1].Buf) + outputRunEnds := arrow.GetData[R](out.Children[0].Buffers[1].Buf) readOffset := re.inputOffset var currentRun V @@ -155,7 +155,7 @@ func (re *runEndEncodeLoopFixedWidth[R, V]) PreallocOutput(ctx *exec.KernelCtx, valueBuffer = ctx.Allocate(int(numOutput) * bufSpec.ByteWidth) } - reeType := arrow.RunEndEncodedOf(exec.GetDataType[R](), re.valueType) + reeType := arrow.RunEndEncodedOf(arrow.GetDataType[R](), re.valueType) out.Release() *out = exec.ExecResult{ @@ -230,7 +230,7 @@ func (re *runEndEncodeFSB[R]) PreallocOutput(ctx *exec.KernelCtx, numOutput int6 } valueBuffer := ctx.Allocate(re.width * int(numOutput)) - reeType := arrow.RunEndEncodedOf(exec.GetDataType[R](), re.valueType) + reeType := arrow.RunEndEncodedOf(arrow.GetDataType[R](), re.valueType) out.Release() *out = exec.ExecResult{ @@ -258,7 +258,7 @@ func (re *runEndEncodeFSB[R]) PreallocOutput(ctx *exec.KernelCtx, numOutput int6 } func (re *runEndEncodeFSB[R]) WriteEncodedRuns(out *exec.ExecResult) int64 { - outputRunEnds := exec.GetData[R](out.Children[0].Buffers[1].Buf) + outputRunEnds := arrow.GetData[R](out.Children[0].Buffers[1].Buf) outputValues := out.Children[1].Buffers[1].Buf readOffset := re.inputOffset @@ -362,7 +362,7 @@ func (re *runEndEncodeLoopBinary[R, O]) PreallocOutput(ctx *exec.KernelCtx, numO valueBuffer := ctx.Allocate(int(re.estimatedValuesLen)) offsetsBuffer := ctx.Allocate(int(numOutput+1) * int(SizeOf[O]())) - reeType := arrow.RunEndEncodedOf(exec.GetDataType[R](), re.valueType) + reeType := arrow.RunEndEncodedOf(arrow.GetDataType[R](), re.valueType) *out = exec.ExecResult{ Type: reeType, Len: re.inputLen, @@ -389,12 +389,12 @@ func (re *runEndEncodeLoopBinary[R, O]) PreallocOutput(ctx *exec.KernelCtx, numO } func (re *runEndEncodeLoopBinary[R, O]) WriteEncodedRuns(out *exec.ExecResult) int64 { - outputRunEnds := exec.GetData[R](out.Children[0].Buffers[1].Buf) + outputRunEnds := arrow.GetData[R](out.Children[0].Buffers[1].Buf) outputOffsets := exec.GetSpanOffsets[O](&out.Children[1], 1) outputValues := out.Children[1].Buffers[2].Buf // re.offsetValues already accounts for the input.offset so we don't - // need to initalize readOffset to re.inputOffset + // need to initialize readOffset to re.inputOffset var readOffset int64 currentRun, curRunValid := re.readValue(readOffset) readOffset++ @@ -443,7 +443,7 @@ func validateRunEndType[R RunEndsType](length int64) error { return nil } -func createEncoder[R RunEndsType, V exec.FixedWidthTypes](input *exec.ArraySpan) *runEndEncodeLoopFixedWidth[R, V] { +func createEncoder[R RunEndsType, V arrow.FixedWidthType](input *exec.ArraySpan) *runEndEncodeLoopFixedWidth[R, V] { return &runEndEncodeLoopFixedWidth[R, V]{ inputLen: input.Len, inputOffset: input.Offset, @@ -539,7 +539,7 @@ func runEndEncodeImpl[R RunEndsType](ctx *exec.KernelCtx, batch *exec.ExecSpan, ) if inputLen == 0 { - reeType := arrow.RunEndEncodedOf(exec.GetDataType[R](), inputArr.Type) + reeType := arrow.RunEndEncodedOf(arrow.GetDataType[R](), inputArr.Type) *out = exec.ExecResult{ Type: reeType, Children: []exec.ArraySpan{ diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_selection.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_selection.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_selection.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_selection.go index c7a902bd..4a619406 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_selection.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/internal/kernels/vector_selection.go @@ -22,13 +22,13 @@ import ( "fmt" "math" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/bitutils" ) type NullSelectionBehavior int8 @@ -99,12 +99,12 @@ type builder[T any] interface { UnsafeAppendBoolToBitmap(bool) } -func getTakeIndices[T exec.IntTypes | exec.UintTypes](mem memory.Allocator, filter *exec.ArraySpan, nullSelect NullSelectionBehavior) arrow.ArrayData { +func getTakeIndices[T arrow.IntType | arrow.UintType](mem memory.Allocator, filter *exec.ArraySpan, nullSelect NullSelectionBehavior) arrow.ArrayData { var ( filterData = filter.Buffers[1].Buf haveFilterNulls = filter.MayHaveNulls() filterIsValid = filter.Buffers[0].Buf - idxType = exec.GetDataType[T]() + idxType = arrow.GetDataType[T]() ) if haveFilterNulls && nullSelect == EmitNulls { @@ -394,7 +394,7 @@ func primitiveFilterImpl(wr writeFiltered, values *exec.ArraySpan, filter *exec. } } -type filterWriter[T exec.UintTypes] struct { +type filterWriter[T arrow.UintType] struct { outPosition int outOffset int valuesOffset int @@ -519,7 +519,7 @@ func PrimitiveFilter(ctx *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecRe return nil } -type primitiveGetter[T exec.IntTypes | bool] interface { +type primitiveGetter[T arrow.IntType | bool] interface { IsValid(int64) bool GetValue(int64) T NullCount() int64 @@ -542,7 +542,7 @@ func (b *boolGetter) GetValue(i int64) bool { func (b *boolGetter) NullCount() int64 { return b.inner.Nulls } func (b *boolGetter) Len() int64 { return b.inner.Len } -type primitiveGetterImpl[T exec.IntTypes] struct { +type primitiveGetterImpl[T arrow.IntType] struct { inner *exec.ArraySpan values []T } @@ -608,7 +608,7 @@ func (c *chunkedBoolGetter) GetValue(i int64) bool { func (c *chunkedBoolGetter) NullCount() int64 { return c.nulls } func (c *chunkedBoolGetter) Len() int64 { return c.len } -type chunkedPrimitiveGetter[T exec.IntTypes] struct { +type chunkedPrimitiveGetter[T arrow.IntType] struct { inner *arrow.Chunked resolver *exec.ChunkResolver nulls int64 @@ -619,7 +619,7 @@ type chunkedPrimitiveGetter[T exec.IntTypes] struct { valuesOffset []int64 } -func newChunkedPrimitiveGetter[T exec.IntTypes](arr *arrow.Chunked) *chunkedPrimitiveGetter[T] { +func newChunkedPrimitiveGetter[T arrow.IntType](arr *arrow.Chunked) *chunkedPrimitiveGetter[T] { nchunks := len(arr.Chunks()) lengths := make([]int64, nchunks) valuesData := make([][]T, nchunks) @@ -630,7 +630,7 @@ func newChunkedPrimitiveGetter[T exec.IntTypes](arr *arrow.Chunked) *chunkedPrim lengths[i] = int64(c.Len()) valuesOffset[i] = int64(c.Data().Offset()) valuesIsValid[i] = c.NullBitmapBytes() - valuesData[i] = exec.GetValues[T](c.Data(), 1) + valuesData[i] = arrow.GetValues[T](c.Data(), 1) } return &chunkedPrimitiveGetter[T]{ @@ -662,7 +662,7 @@ func (c *chunkedPrimitiveGetter[T]) GetValue(i int64) T { func (c *chunkedPrimitiveGetter[T]) NullCount() int64 { return c.nulls } func (c *chunkedPrimitiveGetter[T]) Len() int64 { return c.len } -func primitiveTakeImpl[IdxT exec.UintTypes, ValT exec.IntTypes](values primitiveGetter[ValT], indices *exec.ArraySpan, out *exec.ExecResult) { +func primitiveTakeImpl[IdxT arrow.UintType, ValT arrow.IntType](values primitiveGetter[ValT], indices *exec.ArraySpan, out *exec.ExecResult) { var ( indicesData = exec.GetSpanValues[IdxT](indices, 1) indicesIsValid = indices.Buffers[0].Buf @@ -747,7 +747,7 @@ func primitiveTakeImpl[IdxT exec.UintTypes, ValT exec.IntTypes](values primitive out.Nulls = out.Len - validCount } -func booleanTakeImpl[IdxT exec.UintTypes](values primitiveGetter[bool], indices *exec.ArraySpan, out *exec.ExecResult) { +func booleanTakeImpl[IdxT arrow.UintType](values primitiveGetter[bool], indices *exec.ArraySpan, out *exec.ExecResult) { var ( indicesData = exec.GetSpanValues[IdxT](indices, 1) indicesIsValid = indices.Buffers[0].Buf @@ -876,7 +876,7 @@ func booleanTakeDispatch(values, indices *exec.ArraySpan, out *exec.ExecResult) return nil } -func takeIdxChunkedDispatch[ValT exec.IntTypes](values, indices *arrow.Chunked, out []*exec.ExecResult) error { +func takeIdxChunkedDispatch[ValT arrow.IntType](values, indices *arrow.Chunked, out []*exec.ExecResult) error { getter := newChunkedPrimitiveGetter[ValT](values) var fn func(primitiveGetter[ValT], *exec.ArraySpan, *exec.ExecResult) @@ -901,7 +901,7 @@ func takeIdxChunkedDispatch[ValT exec.IntTypes](values, indices *arrow.Chunked, return nil } -func takeIdxDispatch[ValT exec.IntTypes](values, indices *exec.ArraySpan, out *exec.ExecResult) error { +func takeIdxDispatch[ValT arrow.IntType](values, indices *exec.ArraySpan, out *exec.ExecResult) error { getter := &primitiveGetterImpl[ValT]{inner: values, values: exec.GetSpanValues[ValT](values, 1)} switch indices.Type.(arrow.FixedWidthDataType).Bytes() { @@ -1368,7 +1368,7 @@ func binaryFilterImpl[OffsetT int32 | int64](ctx *exec.KernelCtx, values, filter return nil } -func takeExecImpl[T exec.UintTypes](ctx *exec.KernelCtx, outputLen int64, values, indices *exec.ArraySpan, out *exec.ExecResult, visitValid func(int64) error, visitNull func() error) error { +func takeExecImpl[T arrow.UintType](ctx *exec.KernelCtx, outputLen int64, values, indices *exec.ArraySpan, out *exec.ExecResult, visitValid func(int64) error, visitNull func() error) error { var ( validityBuilder = validityBuilder{mem: exec.GetAllocator(ctx.Ctx)} indicesValues = exec.GetSpanValues[T](indices, 1) @@ -1600,7 +1600,7 @@ func ListImpl[OffsetT int32 | int64](ctx *exec.KernelCtx, batch *exec.ExecSpan, out.Buffers[1].WrapBuffer(offsetBuilder.finish()) out.Children = make([]exec.ArraySpan, 1) - out.Children[0].Type = exec.GetDataType[OffsetT]() + out.Children[0].Type = arrow.GetDataType[OffsetT]() out.Children[0].Len = int64(childIdxBuilder.len()) out.Children[0].Buffers[1].WrapBuffer(childIdxBuilder.finish()) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/registry.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/registry.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/registry.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/registry.go index 4f1c435f..12bc0b85 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/registry.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/registry.go @@ -21,7 +21,7 @@ package compute import ( "sync" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/debug" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_bool.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/scalar_bool.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_bool.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/scalar_bool.go index 49c74568..9a64382a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_bool.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/scalar_bool.go @@ -21,9 +21,9 @@ package compute import ( "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" ) var ( @@ -93,6 +93,11 @@ var ( For a different null behavior, see function "and".`, ArgNames: []string{"x", "y"}, } + notDoc = FunctionDoc{ + Summary: "Logical 'not' boolean values", + Description: "Negates the input boolean value", + ArgNames: []string{"x"}, + } ) func makeFunction(reg FunctionRegistry, name string, arity int, ex exec.ArrayKernelExec, doc FunctionDoc, nulls exec.NullHandling) { @@ -130,4 +135,6 @@ func RegisterScalarBoolean(reg FunctionRegistry) { andNotKleeneDoc, exec.NullComputedPrealloc) makeFunction(reg, "or_kleene", 2, kernels.SimpleBinary[kernels.KleeneOrOpKernel], orKleeneDoc, exec.NullComputedPrealloc) + makeFunction(reg, "not", 1, kernels.NotExecKernel, notDoc, + exec.NullComputedNoPrealloc) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_compare.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/scalar_compare.go similarity index 81% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_compare.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/scalar_compare.go index 476f3771..0e853a65 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_compare.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/scalar_compare.go @@ -21,9 +21,9 @@ package compute import ( "context" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" ) type compareFunction struct { @@ -50,6 +50,7 @@ func (fn *compareFunction) DispatchBest(vals ...arrow.DataType) (exec.Kernel, er } ensureDictionaryDecoded(vals...) + ensureNoExtensionType(vals...) replaceNullWithOtherType(vals...) if dt := commonNumeric(vals...); dt != nil { @@ -134,4 +135,26 @@ func RegisterScalarComparisons(reg FunctionRegistry) { reg.AddFunction(ltFn, false) lteFn := makeFlippedCompare("less_equal", gteFn, EmptyFuncDoc) reg.AddFunction(lteFn, false) + + isOrNotNullKns := kernels.IsNullNotNullKernels() + isNullFn := &compareFunction{*NewScalarFunction("is_null", Unary(), EmptyFuncDoc)} + if err := isNullFn.AddKernel(isOrNotNullKns[0]); err != nil { + panic(err) + } + + isNotNullFn := &compareFunction{*NewScalarFunction("is_not_null", Unary(), EmptyFuncDoc)} + if err := isNotNullFn.AddKernel(isOrNotNullKns[1]); err != nil { + panic(err) + } + + reg.AddFunction(isNullFn, false) + reg.AddFunction(isNotNullFn, false) + + isNaNFn := &compareFunction{*NewScalarFunction("is_nan", Unary(), EmptyFuncDoc)} + for _, k := range kernels.IsNaNKernels() { + if err := isNaNFn.AddKernel(k); err != nil { + panic(err) + } + } + reg.AddFunction(isNaNFn, false) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/selection.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/selection.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/selection.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/selection.go index ed6d8041..feac9d88 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/selection.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/selection.go @@ -22,10 +22,10 @@ import ( "context" "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" "golang.org/x/sync/errgroup" ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/utils.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/utils.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/utils.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/utils.go index cc4d6edc..7e4df8dd 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/utils.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/utils.go @@ -21,15 +21,15 @@ package compute import ( "fmt" "io" - "math" "time" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/compute/exec" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/compute/exec" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/utils" "golang.org/x/xerrors" ) @@ -43,9 +43,9 @@ func (b *bufferWriteSeeker) Reserve(nbytes int) { if b.buf == nil { b.buf = memory.NewResizableBuffer(b.mem) } - newCap := int(math.Max(float64(b.buf.Cap()), 256)) + newCap := utils.Max(b.buf.Cap(), 256) for newCap < b.pos+nbytes { - newCap = bitutil.NextPowerOf2(newCap) + newCap = bitutil.NextPowerOf2(b.pos + nbytes) } b.buf.Reserve(newCap) } @@ -105,6 +105,14 @@ func ensureDictionaryDecoded(vals ...arrow.DataType) { } } +func ensureNoExtensionType(vals ...arrow.DataType) { + for i, v := range vals { + if v.ID() == arrow.EXTENSION { + vals[i] = v.(arrow.ExtensionType).StorageType() + } + } +} + func replaceNullWithOtherType(vals ...arrow.DataType) { debug.Assert(len(vals) == 2, "should be length 2") diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_hash.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/vector_hash.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_hash.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/vector_hash.go index 5f9aec55..facdd1ff 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_hash.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/vector_hash.go @@ -21,8 +21,8 @@ package compute import ( "context" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_run_ends.go b/vendor/github.com/apache/arrow-go/v18/arrow/compute/vector_run_ends.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_run_ends.go rename to vendor/github.com/apache/arrow-go/v18/arrow/compute/vector_run_ends.go index 48f3dcba..c1545b9b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_run_ends.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/compute/vector_run_ends.go @@ -21,8 +21,8 @@ package compute import ( "context" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/compute/internal/kernels" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype.go index f0fb24ec..95565859 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/datatype.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype.go @@ -21,7 +21,7 @@ import ( "hash/maphash" "strings" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) // Type is a logical type. They can be expressed as @@ -107,7 +107,7 @@ const ( // parameters. DECIMAL128 - // DECIMAL256 is a precision and scale based decimal type, with 256 bit max. not yet implemented + // DECIMAL256 is a precision and scale based decimal type, with 256 bit max. DECIMAL256 // LIST is a list of some logical data type @@ -116,10 +116,10 @@ const ( // STRUCT of logical types STRUCT - // SPARSE_UNION of logical types. not yet implemented + // SPARSE_UNION of logical types SPARSE_UNION - // DENSE_UNION of logical types. not yet implemented + // DENSE_UNION of logical types DENSE_UNION // DICTIONARY aka Category type @@ -138,13 +138,13 @@ const ( // or nanoseconds. DURATION - // like STRING, but 64-bit offsets. not yet implemented + // like STRING, but 64-bit offsets LARGE_STRING - // like BINARY but with 64-bit offsets, not yet implemented + // like BINARY but with 64-bit offsets LARGE_BINARY - // like LIST but with 64-bit offsets. not yet implmented + // like LIST but with 64-bit offsets LARGE_LIST // calendar interval with three fields @@ -165,6 +165,12 @@ const ( // like LIST but with 64-bit offsets LARGE_LIST_VIEW + // Decimal value with 32-bit representation + DECIMAL32 + + // Decimal value with 64-bit representation + DECIMAL64 + // Alias to ensure we do not break any consumers DECIMAL = DECIMAL128 ) @@ -210,6 +216,11 @@ type BinaryDataType interface { binary() } +type BinaryViewDataType interface { + BinaryDataType + view() +} + type OffsetsDataType interface { DataType OffsetTypeTraits() OffsetTraits @@ -272,6 +283,8 @@ func (b BufferSpec) Equals(other BufferSpec) bool { type DataTypeLayout struct { Buffers []BufferSpec HasDict bool + // VariadicSpec is what the buffers beyond len(Buffers) are expected to conform to. + VariadicSpec *BufferSpec } func SpecFixedWidth(w int) BufferSpec { return BufferSpec{KindFixedWidth, w} } @@ -358,10 +371,10 @@ func IsLargeBinaryLike(t Type) bool { return false } -// IsFixedSizeBinary returns true for Decimal128/256 and FixedSizeBinary +// IsFixedSizeBinary returns true for Decimal32/64/128/256 and FixedSizeBinary func IsFixedSizeBinary(t Type) bool { switch t { - case DECIMAL128, DECIMAL256, FIXED_SIZE_BINARY: + case DECIMAL32, DECIMAL64, DECIMAL128, DECIMAL256, FIXED_SIZE_BINARY: return true } return false @@ -370,7 +383,7 @@ func IsFixedSizeBinary(t Type) bool { // IsDecimal returns true for Decimal128 and Decimal256 func IsDecimal(t Type) bool { switch t { - case DECIMAL128, DECIMAL256: + case DECIMAL32, DECIMAL64, DECIMAL128, DECIMAL256: return true } return false diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_binary.go similarity index 72% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_binary.go index a3a85686..f3e601f0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_binary.go @@ -83,16 +83,57 @@ func (t *LargeStringType) Layout() DataTypeLayout { func (t *LargeStringType) OffsetTypeTraits() OffsetTraits { return Int64Traits } func (LargeStringType) IsUtf8() bool { return true } +type BinaryViewType struct{} + +func (*BinaryViewType) ID() Type { return BINARY_VIEW } +func (*BinaryViewType) Name() string { return "binary_view" } +func (*BinaryViewType) String() string { return "binary_view" } +func (*BinaryViewType) IsUtf8() bool { return false } +func (*BinaryViewType) binary() {} +func (*BinaryViewType) view() {} +func (t *BinaryViewType) Fingerprint() string { return typeFingerprint(t) } +func (*BinaryViewType) Layout() DataTypeLayout { + variadic := SpecVariableWidth() + return DataTypeLayout{ + Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(ViewHeaderSizeBytes)}, + VariadicSpec: &variadic, + } +} + +type StringViewType struct{} + +func (*StringViewType) ID() Type { return STRING_VIEW } +func (*StringViewType) Name() string { return "string_view" } +func (*StringViewType) String() string { return "string_view" } +func (*StringViewType) IsUtf8() bool { return true } +func (*StringViewType) binary() {} +func (*StringViewType) view() {} +func (t *StringViewType) Fingerprint() string { return typeFingerprint(t) } +func (*StringViewType) Layout() DataTypeLayout { + variadic := SpecVariableWidth() + return DataTypeLayout{ + Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(ViewHeaderSizeBytes)}, + VariadicSpec: &variadic, + } +} + var ( BinaryTypes = struct { Binary BinaryDataType String BinaryDataType LargeBinary BinaryDataType LargeString BinaryDataType + BinaryView BinaryDataType + StringView BinaryDataType }{ Binary: &BinaryType{}, String: &StringType{}, LargeBinary: &LargeBinaryType{}, LargeString: &LargeStringType{}, + BinaryView: &BinaryViewType{}, + StringView: &StringViewType{}, } + + _ BinaryViewDataType = (*StringViewType)(nil) + _ BinaryViewDataType = (*BinaryViewType)(nil) ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_encoded.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_encoded.go index c1750a88..749f03a5 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_encoded.go @@ -58,6 +58,8 @@ func (t *RunEndEncodedType) Fields() []Field { } } +func (t *RunEndEncodedType) NumFields() int { return 2 } + func (*RunEndEncodedType) ValidRunEndsType(dt DataType) bool { switch dt.ID() { case INT16, INT32, INT64: diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_extension.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_extension.go index 271c8b0d..f0bcccdf 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_extension.go @@ -46,7 +46,7 @@ func getExtTypeRegistry() *sync.Map { } // RegisterExtensionType registers the provided ExtensionType by calling ExtensionName -// to use as a Key for registrying the type. If a type with the same name is already +// to use as a Key for registering the type. If a type with the same name is already // registered then this will return an error saying so, otherwise it will return nil // if successful registering the type. // This function is safe to call from multiple goroutines simultaneously. @@ -117,7 +117,7 @@ type ExtensionType interface { // concurrently. Serialize() string // Deserialize is called when reading in extension arrays and types via the IPC format - // in order to construct an instance of the appropriate extension type. The data passed in + // in order to construct an instance of the appropriate extension type. The passed in data // is pulled from the ARROW:extension:metadata key and may be nil or an empty slice. // If the storage type is incorrect or something else is invalid with the data this should // return nil and an appropriate error. @@ -161,6 +161,13 @@ func (e *ExtensionBase) Fields() []Field { return nil } +func (e *ExtensionBase) NumFields() int { + if nested, ok := e.Storage.(NestedType); ok { + return nested.NumFields() + } + return 0 +} + func (e *ExtensionBase) Layout() DataTypeLayout { return e.Storage.Layout() } // this no-op exists to ensure that this type must be embedded in any user-defined extension type. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_fixedwidth.go similarity index 85% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_fixedwidth.go index fc0b3aea..5928be3a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_fixedwidth.go @@ -19,10 +19,12 @@ package arrow import ( "fmt" "strconv" + "sync" "time" - "github.com/apache/arrow/go/v14/internal/json" - + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/internal/json" "golang.org/x/xerrors" ) @@ -69,11 +71,6 @@ type ( // Date32FromTime returns a Date32 value from a time object func Date32FromTime(t time.Time) Date32 { - if _, offset := t.Zone(); offset != 0 { - // properly account for timezone adjustments before we calculate - // the number of days by adjusting the time and converting to UTC - t = t.Add(time.Duration(offset) * time.Second).UTC() - } return Date32(t.Truncate(24*time.Hour).Unix() / int64((time.Hour * 24).Seconds())) } @@ -87,11 +84,6 @@ func (d Date32) FormattedString() string { // Date64FromTime returns a Date64 value from a time object func Date64FromTime(t time.Time) Date64 { - if _, offset := t.Zone(); offset != 0 { - // properly account for timezone adjustments before we calculate - // the actual value by adjusting the time and converting to UTC - t = t.Add(time.Duration(offset) * time.Second).UTC() - } // truncate to the start of the day to get the correct value t = t.Truncate(24 * time.Hour) return Date64(t.Unix()*1e3 + int64(t.Nanosecond())/1e6) @@ -347,13 +339,17 @@ type TemporalWithUnit interface { } // TimestampType is encoded as a 64-bit signed integer since the UNIX epoch (2017-01-01T00:00:00Z). -// The zero-value is a second and time zone neutral. Time zone neutral can be -// considered UTC without having "UTC" as a time zone. +// The zero-value is a second and time zone neutral. In Arrow semantics, time zone neutral does not +// represent a physical point in time, but rather a "wall clock" time that only has meaning within +// the context that produced it. In Go, time.Time can only represent instants; there is no notion +// of "wall clock" time. Therefore, time zone neutral timestamps are represented as UTC per Go +// conventions even though the Arrow type itself has no time zone. type TimestampType struct { Unit TimeUnit TimeZone string loc *time.Location + mx sync.RWMutex } func (*TimestampType) ID() Type { return TIMESTAMP } @@ -386,6 +382,8 @@ func (t *TimestampType) TimeUnit() TimeUnit { return t.Unit } // This should be called if you change the value of the TimeZone after having // potentially called GetZone. func (t *TimestampType) ClearCachedLocation() { + t.mx.Lock() + defer t.mx.Unlock() t.loc = nil } @@ -398,10 +396,20 @@ func (t *TimestampType) ClearCachedLocation() { // so if you change the value of TimeZone after calling this, make sure to call // ClearCachedLocation. func (t *TimestampType) GetZone() (*time.Location, error) { + t.mx.RLock() if t.loc != nil { + defer t.mx.RUnlock() return t.loc, nil } + t.mx.RUnlock() + t.mx.Lock() + defer t.mx.Unlock() + // in case GetZone() was called in between releasing the read lock and + // getting the write lock + if t.loc != nil { + return t.loc, nil + } // the TimeZone string is allowed to be either a valid tzdata string // such as "America/New_York" or an absolute offset of the form -XX:XX // or +XX:XX @@ -415,7 +423,7 @@ func (t *TimestampType) GetZone() (*time.Location, error) { if loc, err := time.LoadLocation(t.TimeZone); err == nil { t.loc = loc - return t.loc, err + return loc, err } // at this point we know that the timezone isn't empty, and didn't match @@ -440,17 +448,7 @@ func (t *TimestampType) GetToTimeFunc() (func(Timestamp) time.Time, error) { return nil, err } - switch t.Unit { - case Second: - return func(v Timestamp) time.Time { return time.Unix(int64(v), 0).In(tz) }, nil - case Millisecond: - return func(v Timestamp) time.Time { return time.UnixMilli(int64(v)).In(tz) }, nil - case Microsecond: - return func(v Timestamp) time.Time { return time.UnixMicro(int64(v)).In(tz) }, nil - case Nanosecond: - return func(v Timestamp) time.Time { return time.Unix(0, int64(v)).In(tz) }, nil - } - return nil, fmt.Errorf("invalid timestamp unit: %s", t.Unit) + return func(v Timestamp) time.Time { return v.ToTime(t.Unit).In(tz) }, nil } // Time32Type is encoded as a 32-bit signed integer, representing either seconds or milliseconds since midnight. @@ -535,19 +533,103 @@ type DecimalType interface { DataType GetPrecision() int32 GetScale() int32 + BitWidth() int +} + +// NarrowestDecimalType constructs the smallest decimal type that can represent +// the requested precision. An error is returned if the requested precision +// cannot be represented (prec <= 0 || prec > 76). +// +// For reference: +// +// prec in [ 1, 9] => Decimal32Type +// prec in [10, 18] => Decimal64Type +// prec in [19, 38] => Decimal128Type +// prec in [39, 76] => Decimal256Type +func NarrowestDecimalType(prec, scale int32) (DecimalType, error) { + switch { + case prec <= 0: + return nil, fmt.Errorf("%w: precision must be > 0 for decimal types, got %d", + ErrInvalid, prec) + case prec <= int32(decimal.MaxPrecision[decimal.Decimal32]()): + return &Decimal32Type{Precision: prec, Scale: scale}, nil + case prec <= int32(decimal.MaxPrecision[decimal.Decimal64]()): + return &Decimal64Type{Precision: prec, Scale: scale}, nil + case prec <= int32(decimal.MaxPrecision[decimal.Decimal128]()): + return &Decimal128Type{Precision: prec, Scale: scale}, nil + case prec <= int32(decimal.MaxPrecision[decimal.Decimal256]()): + return &Decimal256Type{Precision: prec, Scale: scale}, nil + default: + return nil, fmt.Errorf("%w: invalid precision for decimal types, %d", + ErrInvalid, prec) + } } func NewDecimalType(id Type, prec, scale int32) (DecimalType, error) { switch id { + case DECIMAL32: + debug.Assert(prec <= int32(decimal.MaxPrecision[decimal.Decimal32]()), "invalid precision for decimal32") + return &Decimal32Type{Precision: prec, Scale: scale}, nil + case DECIMAL64: + debug.Assert(prec <= int32(decimal.MaxPrecision[decimal.Decimal64]()), "invalid precision for decimal64") + return &Decimal64Type{Precision: prec, Scale: scale}, nil case DECIMAL128: + debug.Assert(prec <= int32(decimal.MaxPrecision[decimal.Decimal128]()), "invalid precision for decimal128") return &Decimal128Type{Precision: prec, Scale: scale}, nil case DECIMAL256: + debug.Assert(prec <= int32(decimal.MaxPrecision[decimal.Decimal256]()), "invalid precision for decimal256") return &Decimal256Type{Precision: prec, Scale: scale}, nil default: - return nil, fmt.Errorf("%w: must use DECIMAL128 or DECIMAL256 to create a DecimalType", ErrInvalid) + return nil, fmt.Errorf("%w: must use one of the DECIMAL IDs to create a DecimalType", ErrInvalid) } } +// Decimal32Type represents a fixed-size 32-bit decimal type. +type Decimal32Type struct { + Precision int32 + Scale int32 +} + +func (*Decimal32Type) ID() Type { return DECIMAL32 } +func (*Decimal32Type) Name() string { return "decimal32" } +func (*Decimal32Type) BitWidth() int { return 32 } +func (*Decimal32Type) Bytes() int { return Decimal32SizeBytes } +func (t *Decimal32Type) String() string { + return fmt.Sprintf("%s(%d, %d)", t.Name(), t.Precision, t.Scale) +} +func (t *Decimal32Type) Fingerprint() string { + return fmt.Sprintf("%s[%d,%d,%d]", typeFingerprint(t), t.BitWidth(), t.Precision, t.Scale) +} +func (t *Decimal32Type) GetPrecision() int32 { return t.Precision } +func (t *Decimal32Type) GetScale() int32 { return t.Scale } + +func (Decimal32Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Decimal32SizeBytes)}} +} + +// Decimal64Type represents a fixed-size 32-bit decimal type. +type Decimal64Type struct { + Precision int32 + Scale int32 +} + +func (*Decimal64Type) ID() Type { return DECIMAL64 } +func (*Decimal64Type) Name() string { return "decimal64" } +func (*Decimal64Type) BitWidth() int { return 64 } +func (*Decimal64Type) Bytes() int { return Decimal64SizeBytes } +func (t *Decimal64Type) String() string { + return fmt.Sprintf("%s(%d, %d)", t.Name(), t.Precision, t.Scale) +} +func (t *Decimal64Type) Fingerprint() string { + return fmt.Sprintf("%s[%d,%d,%d]", typeFingerprint(t), t.BitWidth(), t.Precision, t.Scale) +} +func (t *Decimal64Type) GetPrecision() int32 { return t.Precision } +func (t *Decimal64Type) GetScale() int32 { return t.Scale } + +func (Decimal64Type) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Decimal64SizeBytes)}} +} + // Decimal128Type represents a fixed-size 128-bit decimal type. type Decimal128Type struct { Precision int32 diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_nested.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_nested.go index 4ae48803..6664c505 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_nested.go @@ -22,7 +22,7 @@ import ( "strconv" "strings" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) type ( @@ -32,6 +32,8 @@ type ( // Fields method provides a copy of NestedType fields // (so it can be safely mutated and will not result in updating the NestedType). Fields() []Field + // NumFields provides the number of fields without allocating. + NumFields() int } ListLikeType interface { @@ -109,6 +111,8 @@ func (t *ListType) ElemField() Field { func (t *ListType) Fields() []Field { return []Field{t.ElemField()} } +func (t *ListType) NumFields() int { return 1 } + func (*ListType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int32SizeBytes)}} } @@ -242,6 +246,8 @@ func (t *FixedSizeListType) Fingerprint() string { func (t *FixedSizeListType) Fields() []Field { return []Field{t.ElemField()} } +func (t *FixedSizeListType) NumFields() int { return 1 } + func (*FixedSizeListType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap()}} } @@ -308,6 +314,8 @@ func (t *ListViewType) ElemField() Field { func (t *ListViewType) Fields() []Field { return []Field{t.ElemField()} } +func (t *ListViewType) NumFields() int { return 1 } + func (*ListViewType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int32SizeBytes), SpecFixedWidth(Int32SizeBytes)}} } @@ -376,6 +384,8 @@ func (t *LargeListViewType) ElemField() Field { func (t *LargeListViewType) Fields() []Field { return []Field{t.ElemField()} } +func (t *LargeListViewType) NumFields() int { return 1 } + func (*LargeListViewType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int64SizeBytes), SpecFixedWidth(Int64SizeBytes)}} } @@ -447,6 +457,8 @@ func (t *StructType) Fields() []Field { return fields } +func (t *StructType) NumFields() int { return len(t.fields) } + func (t *StructType) Field(i int) Field { return t.fields[i] } // FieldByName gets the field with the given name. @@ -464,7 +476,7 @@ func (t *StructType) FieldByName(name string) (Field, bool) { // FieldIdx gets the index of the field with the given name. // // If there are multiple fields with the given name, FieldIdx returns -// the index of the first first such field. +// the index of the first such field. func (t *StructType) FieldIdx(name string) (int, bool) { i, ok := t.index[name] if ok { @@ -524,6 +536,23 @@ func MapOf(key, item DataType) *MapType { return &MapType{value: ListOf(StructOf(Field{Name: "key", Type: key}, Field{Name: "value", Type: item, Nullable: true}))} } +func MapOfFields(key, item Field) *MapType { + if key.Type == nil || item.Type == nil { + panic("arrow: nil key or item type for MapType") + } + + if key.Nullable { + panic("arrow: key field must be non-nullable") + } + + key.Name = "key" + item.Name = "value" + return &MapType{value: ListOfField(Field{ + Name: "entries", + Type: StructOf(key, item), + })} +} + func MapOfWithMetadata(key DataType, keyMetadata Metadata, item DataType, itemMetadata Metadata) *MapType { if key == nil || item == nil { panic("arrow: nil key or item type for MapType") @@ -598,6 +627,8 @@ func (t *MapType) Fingerprint() string { func (t *MapType) Fields() []Field { return []Field{t.ElemField()} } +func (t *MapType) NumFields() int { return 1 } + func (t *MapType) Layout() DataTypeLayout { return t.value.Layout() } @@ -690,6 +721,8 @@ func (t *unionType) Fields() []Field { return fields } +func (t *unionType) NumFields() int { return len(t.children) } + func (t *unionType) TypeCodes() []UnionTypeCode { return t.typeCodes } func (t *unionType) ChildIDs() []int { return t.childIDs[:] } @@ -861,7 +894,7 @@ func DenseUnionFromArrays(children []Array, fields []string, codes []UnionTypeCo } // DenseUnionOf is equivalent to UnionOf(arrow.DenseMode, fields, typeCodes), -// constructing a SparseUnionType from a list of fields and type codes. +// constructing a DenseUnionType from a list of fields and type codes. // // If len(fields) != len(typeCodes) this will panic. They are allowed to be // of length 0. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_null.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_null.go index 2d2454c6..c852b854 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_null.go @@ -27,7 +27,5 @@ func (*NullType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecAlwaysNull()}} } -var ( - Null *NullType - _ DataType = Null -) +// Null gives us both the compile-time assertion of DataType interface as well as serving a good element for use in schemas. +var Null DataType = new(NullType) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_numeric.gen.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_numeric.gen.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_numeric.gen.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_numeric.gen.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_numeric.gen.go.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata rename to vendor/github.com/apache/arrow-go/v18/arrow/datatype_numeric.gen.go.tmpldata diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader.go new file mode 100644 index 00000000..691b97df --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader.go @@ -0,0 +1,141 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "bytes" + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" +) + +const ( + ViewPrefixLen = 4 + viewInlineSize = 12 +) + +func IsViewInline(length int) bool { + return length < viewInlineSize +} + +// ViewHeader is a variable length string (utf8) or byte slice with +// a 4 byte prefix and inline optimization for small values (12 bytes +// or fewer). This is similar to Go's standard string but limited by +// a length of Uint32Max and up to the first four bytes of the string +// are copied into the struct. This prefix allows failing comparisons +// early and can reduce CPU cache working set when dealing with short +// strings. +// +// There are two situations: +// +// Entirely inlined string data +// |----|------------| +// ^ ^ +// | | +// size inline string data, zero padded +// +// Reference into buffer +// |----|----|----|----| +// ^ ^ ^ ^ +// | | | | +// size prefix buffer index and offset to out-of-line portion +// +// Adapted from TU Munich's UmbraDB [1], Velox, DuckDB. +// +// [1]: https://db.in.tum.de/~freitag/papers/p29-neumann-cidr20.pdf +type ViewHeader struct { + size int32 + // the first 4 bytes of this are the prefix for the string + // if size <= StringHeaderInlineSize, then the entire string + // is in the data array and is zero padded. + // if size > StringHeaderInlineSize, the next 8 bytes are 2 uint32 + // values which are the buffer index and offset in that buffer + // containing the full string. + data [viewInlineSize]byte +} + +func (sh *ViewHeader) IsInline() bool { + return sh.size <= int32(viewInlineSize) +} + +func (sh *ViewHeader) Len() int { return int(sh.size) } +func (sh *ViewHeader) Prefix() [ViewPrefixLen]byte { + return *(*[4]byte)(unsafe.Pointer(&sh.data)) +} + +func (sh *ViewHeader) BufferIndex() int32 { + return int32(endian.Native.Uint32(sh.data[ViewPrefixLen:])) +} + +func (sh *ViewHeader) BufferOffset() int32 { + return int32(endian.Native.Uint32(sh.data[ViewPrefixLen+4:])) +} + +func (sh *ViewHeader) InlineBytes() (data []byte) { + debug.Assert(sh.IsInline(), "calling InlineBytes on non-inline ViewHeader") + return sh.data[:sh.size] +} + +func (sh *ViewHeader) SetBytes(data []byte) int { + sh.size = int32(len(data)) + if sh.IsInline() { + return copy(sh.data[:], data) + } + return copy(sh.data[:4], data) +} + +func (sh *ViewHeader) SetString(data string) int { + sh.size = int32(len(data)) + if sh.IsInline() { + return copy(sh.data[:], data) + } + return copy(sh.data[:4], data) +} + +func (sh *ViewHeader) SetIndexOffset(bufferIndex, offset int32) { + endian.Native.PutUint32(sh.data[ViewPrefixLen:], uint32(bufferIndex)) + endian.Native.PutUint32(sh.data[ViewPrefixLen+4:], uint32(offset)) +} + +func (sh *ViewHeader) Equals(buffers []*memory.Buffer, other *ViewHeader, otherBuffers []*memory.Buffer) bool { + if sh.sizeAndPrefixAsInt64() != other.sizeAndPrefixAsInt64() { + return false + } + + if sh.IsInline() { + return sh.inlinedAsInt64() == other.inlinedAsInt64() + } + + return bytes.Equal(sh.getBufferBytes(buffers), other.getBufferBytes(otherBuffers)) +} + +func (sh *ViewHeader) getBufferBytes(buffers []*memory.Buffer) []byte { + offset := sh.BufferOffset() + return buffers[sh.BufferIndex()].Bytes()[offset : offset+sh.size] +} + +func (sh *ViewHeader) inlinedAsInt64() int64 { + s := unsafe.Slice((*int64)(unsafe.Pointer(sh)), 2) + return s[1] +} + +func (sh *ViewHeader) sizeAndPrefixAsInt64() int64 { + s := unsafe.Slice((*int64)(unsafe.Pointer(sh)), 2) + return s[0] +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline.go new file mode 100644 index 00000000..960cc060 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.20 + +package arrow + +import ( + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/internal/debug" +) + +func (sh *ViewHeader) InlineString() (data string) { + debug.Assert(sh.IsInline(), "calling InlineString on non-inline ViewHeader") + + return unsafe.String((*byte)(unsafe.Pointer(&sh.data)), sh.size) +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline_go1.19.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline_go1.19.go new file mode 100644 index 00000000..e1219030 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline_go1.19.go @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.20 && !tinygo + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/internal/debug" +) + +func (sh *ViewHeader) InlineString() (data string) { + debug.Assert(sh.IsInline(), "calling InlineString on non-inline ViewHeader") + + h := (*reflect.StringHeader)(unsafe.Pointer(&data)) + h.Data = uintptr(unsafe.Pointer(&sh.data)) + h.Len = int(sh.size) + return +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline_tinygo.go b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline_tinygo.go new file mode 100644 index 00000000..b4cedbda --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/datatype_viewheader_inline_tinygo.go @@ -0,0 +1,35 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.20 && tinygo + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/internal/debug" +) + +func (sh *ViewHeader) InlineString() (data string) { + debug.Assert(sh.IsInline(), "calling InlineString on non-inline ViewHeader") + + h := (*reflect.StringHeader)(unsafe.Pointer(&data)) + h.Data = uintptr(unsafe.Pointer(&sh.data)) + h.Len = uintptr(sh.size) + return +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/decimal/decimal.go b/vendor/github.com/apache/arrow-go/v18/arrow/decimal/decimal.go new file mode 100644 index 00000000..098a4e0f --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/decimal/decimal.go @@ -0,0 +1,473 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package decimal + +import ( + "errors" + "fmt" + "math" + "math/big" + "math/bits" + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/internal/debug" +) + +// DecimalTypes is a generic constraint representing the implemented decimal types +// in this package, and a single point of update for future additions. Everything +// else is constrained by this. +type DecimalTypes interface { + Decimal32 | Decimal64 | Decimal128 | Decimal256 +} + +// Num is an interface that is useful for building generic types for all decimal +// type implementations. It presents all the methods and interfaces necessary to +// operate on the decimal objects without having to care about the bit width. +type Num[T DecimalTypes] interface { + Negate() T + Add(T) T + Sub(T) T + Mul(T) T + Div(T) (res, rem T) + Pow(T) T + + FitsInPrecision(int32) bool + Abs() T + Sign() int + Rescale(int32, int32) (T, error) + Cmp(T) int + + IncreaseScaleBy(int32) T + ReduceScaleBy(int32, bool) T + + ToFloat32(int32) float32 + ToFloat64(int32) float64 + ToBigFloat(int32) *big.Float + + ToString(int32) string +} + +type ( + Decimal32 int32 + Decimal64 int64 + Decimal128 = decimal128.Num + Decimal256 = decimal256.Num +) + +func MaxPrecision[T DecimalTypes]() int { + // max precision is computed by Floor(log10(2^(nbytes * 8 - 1) - 1)) + var z T + return int(math.Floor(math.Log10(math.Pow(2, float64(unsafe.Sizeof(z))*8-1) - 1))) +} + +func (d Decimal32) Negate() Decimal32 { return -d } +func (d Decimal64) Negate() Decimal64 { return -d } + +func (d Decimal32) Add(rhs Decimal32) Decimal32 { return d + rhs } +func (d Decimal64) Add(rhs Decimal64) Decimal64 { return d + rhs } + +func (d Decimal32) Sub(rhs Decimal32) Decimal32 { return d - rhs } +func (d Decimal64) Sub(rhs Decimal64) Decimal64 { return d - rhs } + +func (d Decimal32) Mul(rhs Decimal32) Decimal32 { return d * rhs } +func (d Decimal64) Mul(rhs Decimal64) Decimal64 { return d * rhs } + +func (d Decimal32) Div(rhs Decimal32) (res, rem Decimal32) { + return d / rhs, d % rhs +} + +func (d Decimal64) Div(rhs Decimal64) (res, rem Decimal64) { + return d / rhs, d % rhs +} + +// about 4-5x faster than using math.Pow which requires converting to float64 +// and back to integers +func intPow[T int32 | int64](base, exp T) T { + result := T(1) + for { + if exp&1 == 1 { + result *= base + } + exp >>= 1 + if exp == 0 { + break + } + base *= base + } + + return result +} + +func (d Decimal32) Pow(rhs Decimal32) Decimal32 { + return Decimal32(intPow(int32(d), int32(rhs))) +} + +func (d Decimal64) Pow(rhs Decimal64) Decimal64 { + return Decimal64(intPow(int64(d), int64(rhs))) +} + +func (d Decimal32) Sign() int { + if d == 0 { + return 0 + } + return int(1 | (d >> 31)) +} + +func (d Decimal64) Sign() int { + if d == 0 { + return 0 + } + return int(1 | (d >> 63)) +} + +func (n Decimal32) Abs() Decimal32 { + if n < 0 { + return -n + } + return n +} + +func (n Decimal64) Abs() Decimal64 { + if n < 0 { + return -n + } + return n +} + +func (n Decimal32) FitsInPrecision(prec int32) bool { + debug.Assert(prec > 0, "precision must be > 0") + debug.Assert(prec <= 9, "precision must be <= 9") + return n.Abs() < Decimal32(math.Pow10(int(prec))) +} + +func (n Decimal64) FitsInPrecision(prec int32) bool { + debug.Assert(prec > 0, "precision must be > 0") + debug.Assert(prec <= 18, "precision must be <= 18") + return n.Abs() < Decimal64(math.Pow10(int(prec))) +} + +func (n Decimal32) ToString(scale int32) string { + return n.ToBigFloat(scale).Text('f', int(scale)) +} + +func (n Decimal64) ToString(scale int32) string { + return n.ToBigFloat(scale).Text('f', int(scale)) +} + +var pt5 = big.NewFloat(0.5) + +func decimalFromString[T interface { + Decimal32 | Decimal64 + FitsInPrecision(int32) bool +}](v string, prec, scale int32) (n T, err error) { + var nbits = uint(unsafe.Sizeof(T(0))) * 8 + + var out *big.Float + out, _, err = big.ParseFloat(v, 10, nbits, big.ToNearestEven) + + if scale < 0 { + var tmp big.Int + val, _ := out.Int(&tmp) + if val.BitLen() > int(nbits) { + return n, fmt.Errorf("bitlen too large for decimal%d", nbits) + } + + n = T(val.Int64() / int64(math.Pow10(int(-scale)))) + } else { + var precInBits = uint(math.Round(float64(prec+scale+1)/math.Log10(2))) + 1 + + p := (&big.Float{}).SetInt(big.NewInt(int64(math.Pow10(int(scale))))) + out.SetPrec(precInBits).Mul(out, p) + if out.Signbit() { + out.Sub(out, pt5) + } else { + out.Add(out, pt5) + } + + var tmp big.Int + val, _ := out.Int(&tmp) + if val.BitLen() > int(nbits) { + return n, fmt.Errorf("bitlen too large for decimal%d", nbits) + } + n = T(val.Int64()) + } + + if !n.FitsInPrecision(prec) { + err = fmt.Errorf("val %v doesn't fit in precision %d", n, prec) + } + return +} + +func Decimal32FromString(v string, prec, scale int32) (n Decimal32, err error) { + return decimalFromString[Decimal32](v, prec, scale) +} + +func Decimal64FromString(v string, prec, scale int32) (n Decimal64, err error) { + return decimalFromString[Decimal64](v, prec, scale) +} + +func Decimal128FromString(v string, prec, scale int32) (n Decimal128, err error) { + return decimal128.FromString(v, prec, scale) +} + +func Decimal256FromString(v string, prec, scale int32) (n Decimal256, err error) { + return decimal256.FromString(v, prec, scale) +} + +func scalePositiveFloat64(v float64, prec, scale int32) (float64, error) { + v *= math.Pow10(int(scale)) + v = math.RoundToEven(v) + + maxabs := math.Pow10(int(prec)) + if v >= maxabs { + return 0, fmt.Errorf("cannot convert %f to decimal(precision=%d, scale=%d)", v, prec, scale) + } + return v, nil +} + +func fromPositiveFloat[T Decimal32 | Decimal64, F float32 | float64](v F, prec, scale int32) (T, error) { + if prec > int32(MaxPrecision[T]()) { + return T(0), fmt.Errorf("invalid precision %d for converting float to Decimal", prec) + } + + val, err := scalePositiveFloat64(float64(v), prec, scale) + if err != nil { + return T(0), err + } + + return T(F(val)), nil +} + +func Decimal32FromFloat[F float32 | float64](v F, prec, scale int32) (Decimal32, error) { + if v < 0 { + dec, err := fromPositiveFloat[Decimal32](-v, prec, scale) + if err != nil { + return dec, err + } + + return -dec, nil + } + + return fromPositiveFloat[Decimal32](v, prec, scale) +} + +func Decimal64FromFloat[F float32 | float64](v F, prec, scale int32) (Decimal64, error) { + if v < 0 { + dec, err := fromPositiveFloat[Decimal64](-v, prec, scale) + if err != nil { + return dec, err + } + + return -dec, nil + } + + return fromPositiveFloat[Decimal64](v, prec, scale) +} + +func Decimal128FromFloat(v float64, prec, scale int32) (Decimal128, error) { + return decimal128.FromFloat64(v, prec, scale) +} + +func Decimal256FromFloat(v float64, prec, scale int32) (Decimal256, error) { + return decimal256.FromFloat64(v, prec, scale) +} + +func (n Decimal32) ToFloat32(scale int32) float32 { + return float32(n.ToFloat64(scale)) +} + +func (n Decimal64) ToFloat32(scale int32) float32 { + return float32(n.ToFloat64(scale)) +} + +func (n Decimal32) ToFloat64(scale int32) float64 { + return float64(n) * math.Pow10(-int(scale)) +} + +func (n Decimal64) ToFloat64(scale int32) float64 { + return float64(n) * math.Pow10(-int(scale)) +} + +func (n Decimal32) ToBigFloat(scale int32) *big.Float { + f := (&big.Float{}).SetInt64(int64(n)) + if scale < 0 { + f.SetPrec(32).Mul(f, (&big.Float{}).SetInt64(intPow(10, -int64(scale)))) + } else { + f.SetPrec(32).Quo(f, (&big.Float{}).SetInt64(intPow(10, int64(scale)))) + } + return f +} + +func (n Decimal64) ToBigFloat(scale int32) *big.Float { + f := (&big.Float{}).SetInt64(int64(n)) + if scale < 0 { + f.SetPrec(64).Mul(f, (&big.Float{}).SetInt64(intPow(10, -int64(scale)))) + } else { + f.SetPrec(64).Quo(f, (&big.Float{}).SetInt64(intPow(10, int64(scale)))) + } + return f +} + +func cmpDec[T Decimal32 | Decimal64](lhs, rhs T) int { + switch { + case lhs > rhs: + return 1 + case lhs < rhs: + return -1 + } + return 0 +} + +func (n Decimal32) Cmp(other Decimal32) int { + return cmpDec(n, other) +} + +func (n Decimal64) Cmp(other Decimal64) int { + return cmpDec(n, other) +} + +func (n Decimal32) IncreaseScaleBy(increase int32) Decimal32 { + debug.Assert(increase >= 0, "invalid increase scale for decimal32") + debug.Assert(increase <= 9, "invalid increase scale for decimal32") + + return n * Decimal32(intPow(10, increase)) +} + +func (n Decimal64) IncreaseScaleBy(increase int32) Decimal64 { + debug.Assert(increase >= 0, "invalid increase scale for decimal64") + debug.Assert(increase <= 18, "invalid increase scale for decimal64") + + return n * Decimal64(intPow(10, int64(increase))) +} + +func reduceScale[T interface { + Decimal32 | Decimal64 + Abs() T +}](n T, reduce int32, round bool) T { + if reduce == 0 { + return n + } + + divisor := T(intPow(10, reduce)) + if !round { + return n / divisor + } + + quo, remainder := n/divisor, n%divisor + divisorHalf := divisor / 2 + if remainder.Abs() >= divisorHalf { + if n > 0 { + quo++ + } else { + quo-- + } + } + + return quo +} + +func (n Decimal32) ReduceScaleBy(reduce int32, round bool) Decimal32 { + debug.Assert(reduce >= 0, "invalid reduce scale for decimal32") + debug.Assert(reduce <= 9, "invalid reduce scale for decimal32") + + return reduceScale(n, reduce, round) +} + +func (n Decimal64) ReduceScaleBy(reduce int32, round bool) Decimal64 { + debug.Assert(reduce >= 0, "invalid reduce scale for decimal32") + debug.Assert(reduce <= 18, "invalid reduce scale for decimal32") + + return reduceScale(n, reduce, round) +} + +//lint:ignore U1000 function is being used, staticcheck seems to not follow generics +func (n Decimal32) rescaleWouldCauseDataLoss(deltaScale int32, multiplier Decimal32) (out Decimal32, loss bool) { + if deltaScale < 0 { + debug.Assert(multiplier != 0, "multiplier must not be zero") + quo, remainder := bits.Div32(0, uint32(n), uint32(multiplier)) + return Decimal32(quo), remainder != 0 + } + + overflow, result := bits.Mul32(uint32(n), uint32(multiplier)) + if overflow != 0 { + return Decimal32(result), true + } + + out = Decimal32(result) + return out, out < n +} + +//lint:ignore U1000 function is being used, staticcheck seems to not follow generics +func (n Decimal64) rescaleWouldCauseDataLoss(deltaScale int32, multiplier Decimal64) (out Decimal64, loss bool) { + if deltaScale < 0 { + debug.Assert(multiplier != 0, "multiplier must not be zero") + quo, remainder := bits.Div32(0, uint32(n), uint32(multiplier)) + return Decimal64(quo), remainder != 0 + } + + overflow, result := bits.Mul32(uint32(n), uint32(multiplier)) + if overflow != 0 { + return Decimal64(result), true + } + + out = Decimal64(result) + return out, out < n +} + +func rescale[T interface { + Decimal32 | Decimal64 + rescaleWouldCauseDataLoss(int32, T) (T, bool) + Sign() int +}](n T, originalScale, newScale int32) (out T, err error) { + if originalScale == newScale { + return n, nil + } + + deltaScale := newScale - originalScale + absDeltaScale := int32(math.Abs(float64(deltaScale))) + + sign := n.Sign() + if n < 0 { + n = -n + } + + multiplier := T(intPow(10, absDeltaScale)) + var wouldHaveLoss bool + out, wouldHaveLoss = n.rescaleWouldCauseDataLoss(deltaScale, multiplier) + if wouldHaveLoss { + err = errors.New("rescale data loss") + } + out *= T(sign) + return +} + +func (n Decimal32) Rescale(originalScale, newScale int32) (out Decimal32, err error) { + return rescale(n, originalScale, newScale) +} + +func (n Decimal64) Rescale(originalScale, newScale int32) (out Decimal64, err error) { + return rescale(n, originalScale, newScale) +} + +var ( + _ Num[Decimal32] = Decimal32(0) + _ Num[Decimal64] = Decimal64(0) + _ Num[Decimal128] = Decimal128{} + _ Num[Decimal256] = Decimal256{} +) diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/decimal/traits.go b/vendor/github.com/apache/arrow-go/v18/arrow/decimal/traits.go new file mode 100644 index 00000000..0ec0c315 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/decimal/traits.go @@ -0,0 +1,78 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package decimal + +// Traits is a convenience for building generic objects for operating on +// Decimal values to get around the limitations of Go generics. By providing this +// interface a generic object can handle producing the proper types to generate +// new decimal values. +type Traits[T DecimalTypes] interface { + BytesRequired(int) int + FromString(string, int32, int32) (T, error) + FromFloat64(float64, int32, int32) (T, error) +} + +var ( + Dec32Traits dec32Traits + Dec64Traits dec64Traits + Dec128Traits dec128Traits + Dec256Traits dec256Traits +) + +type ( + dec32Traits struct{} + dec64Traits struct{} + dec128Traits struct{} + dec256Traits struct{} +) + +func (dec32Traits) BytesRequired(n int) int { return 4 * n } +func (dec64Traits) BytesRequired(n int) int { return 8 * n } +func (dec128Traits) BytesRequired(n int) int { return 16 * n } +func (dec256Traits) BytesRequired(n int) int { return 32 * n } + +func (dec32Traits) FromString(v string, prec, scale int32) (Decimal32, error) { + return Decimal32FromString(v, prec, scale) +} + +func (dec64Traits) FromString(v string, prec, scale int32) (Decimal64, error) { + return Decimal64FromString(v, prec, scale) +} + +func (dec128Traits) FromString(v string, prec, scale int32) (Decimal128, error) { + return Decimal128FromString(v, prec, scale) +} + +func (dec256Traits) FromString(v string, prec, scale int32) (Decimal256, error) { + return Decimal256FromString(v, prec, scale) +} + +func (dec32Traits) FromFloat64(v float64, prec, scale int32) (Decimal32, error) { + return Decimal32FromFloat(v, prec, scale) +} + +func (dec64Traits) FromFloat64(v float64, prec, scale int32) (Decimal64, error) { + return Decimal64FromFloat(v, prec, scale) +} + +func (dec128Traits) FromFloat64(v float64, prec, scale int32) (Decimal128, error) { + return Decimal128FromFloat(v, prec, scale) +} + +func (dec256Traits) FromFloat64(v float64, prec, scale int32) (Decimal256, error) { + return Decimal256FromFloat(v, prec, scale) +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go b/vendor/github.com/apache/arrow-go/v18/arrow/decimal128/decimal128.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go rename to vendor/github.com/apache/arrow-go/v18/arrow/decimal128/decimal128.go index 898d7b42..660c4131 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/decimal128/decimal128.go @@ -23,7 +23,7 @@ import ( "math/big" "math/bits" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) const ( @@ -237,7 +237,7 @@ func FromString(v string, prec, scale int32) (n Num, err error) { // math/big library refers to precision in floating point terms // where it refers to the "number of bits of precision in the mantissa". // So we need to figure out how many bits we should use for precision, - // based on the input precision. Too much precision and we're not rounding + // based on the input precision. Too much precision and we aren't rounding // when we should. Too little precision and we round when we shouldn't. // // In general, the number of decimal digits you get from a given number @@ -261,28 +261,40 @@ func FromString(v string, prec, scale int32) (n Num, err error) { var precInBits = uint(math.Round(float64(prec+scale+1)/math.Log10(2))) + 1 var out *big.Float - out, _, err = big.ParseFloat(v, 10, 127, big.ToNearestEven) + out, _, err = big.ParseFloat(v, 10, 128, big.ToNearestEven) if err != nil { return } - // Since we're going to truncate this to get an integer, we need to round - // the value instead because of edge cases so that we match how other implementations - // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if - // we're positive we'll add 0.5. - out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits) - if out.Signbit() { - out.Sub(out, pt5) + if scale < 0 { + var tmp big.Int + val, _ := out.Int(&tmp) + if val.BitLen() > 127 { + return Num{}, errors.New("bitlen too large for decimal128") + } + n = FromBigInt(val) + n, _ = n.Div(scaleMultipliers[-scale]) } else { - out.Add(out, pt5) - } + // Since we're going to truncate this to get an integer, we need to round + // the value instead because of edge cases so that we match how other implementations + // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if + // we're positive we'll add 0.5. + p := (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt()) + out.SetPrec(precInBits).Mul(out, p) + if out.Signbit() { + out.Sub(out, pt5) + } else { + out.Add(out, pt5) + } - var tmp big.Int - val, _ := out.Int(&tmp) - if val.BitLen() > 127 { - return Num{}, errors.New("bitlen too large for decimal128") + var tmp big.Int + val, _ := out.Int(&tmp) + if val.BitLen() > 127 { + return Num{}, errors.New("bitlen too large for decimal128") + } + n = FromBigInt(val) } - n = FromBigInt(val) + if !n.FitsInPrecision(prec) { err = fmt.Errorf("val %v doesn't fit in precision %d", n, prec) } @@ -315,6 +327,16 @@ func (n Num) ToFloat64(scale int32) float64 { return n.tofloat64Positive(scale) } +func (n Num) ToBigFloat(scale int32) *big.Float { + f := (&big.Float{}).SetInt(n.BigInt()) + if scale < 0 { + f.SetPrec(128).Mul(f, (&big.Float{}).SetInt(scaleMultipliers[-scale].BigInt())) + } else { + f.SetPrec(128).Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + } + return f +} + // LowBits returns the low bits of the two's complement representation of the number. func (n Num) LowBits() uint64 { return n.lo } @@ -505,7 +527,11 @@ func (n Num) FitsInPrecision(prec int32) bool { func (n Num) ToString(scale int32) string { f := (&big.Float{}).SetInt(n.BigInt()) - f.Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + if scale < 0 { + f.SetPrec(128).Mul(f, (&big.Float{}).SetInt(scaleMultipliers[-scale].BigInt())) + } else { + f.SetPrec(128).Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + } return f.Text('f', int(scale)) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go b/vendor/github.com/apache/arrow-go/v18/arrow/decimal256/decimal256.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go rename to vendor/github.com/apache/arrow-go/v18/arrow/decimal256/decimal256.go index 4bfcd4e0..82c52a65 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/decimal256/decimal256.go @@ -23,8 +23,8 @@ import ( "math/big" "math/bits" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) const ( @@ -125,7 +125,7 @@ func FromString(v string, prec, scale int32) (n Num, err error) { // math/big library refers to precision in floating point terms // where it refers to the "number of bits of precision in the mantissa". // So we need to figure out how many bits we should use for precision, - // based on the input precision. Too much precision and we're not rounding + // based on the input precision. Too much precision and we aren't rounding // when we should. Too little precision and we round when we shouldn't. // // In general, the number of decimal digits you get from a given number @@ -154,23 +154,34 @@ func FromString(v string, prec, scale int32) (n Num, err error) { return } - out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits) - // Since we're going to truncate this to get an integer, we need to round - // the value instead because of edge cases so that we match how other implementations - // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if - // we're positive we'll add 0.5. - if out.Signbit() { - out.Sub(out, pt5) + if scale < 0 { + var tmp big.Int + val, _ := out.Int(&tmp) + if val.BitLen() > 255 { + return Num{}, errors.New("bitlen too large for decimal256") + } + n = FromBigInt(val) + + n, _ = n.Div(scaleMultipliers[-scale]) } else { - out.Add(out, pt5) - } + out.Mul(out, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())).SetPrec(precInBits) + // Since we're going to truncate this to get an integer, we need to round + // the value instead because of edge cases so that we match how other implementations + // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if + // we're positive we'll add 0.5. + if out.Signbit() { + out.Sub(out, pt5) + } else { + out.Add(out, pt5) + } - var tmp big.Int - val, _ := out.Int(&tmp) - if val.BitLen() > 255 { - return Num{}, errors.New("bitlen too large for decimal256") + var tmp big.Int + val, _ := out.Int(&tmp) + if val.BitLen() > 255 { + return Num{}, errors.New("bitlen too large for decimal256") + } + n = FromBigInt(val) } - n = FromBigInt(val) if !n.FitsInPrecision(prec) { err = fmt.Errorf("value %v doesn't fit in precision %d", n, prec) } @@ -328,6 +339,16 @@ func (n Num) ToFloat64(scale int32) float64 { return n.tofloat64Positive(scale) } +func (n Num) ToBigFloat(scale int32) *big.Float { + f := (&big.Float{}).SetInt(n.BigInt()) + if scale < 0 { + f.SetPrec(256).Mul(f, (&big.Float{}).SetInt(scaleMultipliers[-scale].BigInt())) + } else { + f.SetPrec(256).Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + } + return f +} + func (n Num) Sign() int { if n == (Num{}) { return 0 @@ -506,7 +527,11 @@ func (n Num) FitsInPrecision(prec int32) bool { func (n Num) ToString(scale int32) string { f := (&big.Float{}).SetInt(n.BigInt()) - f.Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + if scale < 0 { + f.SetPrec(256).Mul(f, (&big.Float{}).SetInt(scaleMultipliers[-scale].BigInt())) + } else { + f.SetPrec(256).Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt())) + } return f.Text('f', int(scale)) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/doc.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/doc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/doc.go index e923d05d..690a4f53 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/doc.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/doc.go @@ -30,13 +30,11 @@ array is valid (not null). If the array has no null entries, it is possible to o # Requirements -Despite the go.mod stating go1.20, everything is able to be built with go1.19 or higher. - To build with tinygo include the noasm build tag. */ package arrow -const PkgVersion = "14.0.2" +const PkgVersion = "18.2.0" //go:generate go run _tools/tmpl/main.go -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl //go:generate go run _tools/tmpl/main.go -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl tensor/numeric.gen.go.tmpl tensor/numeric.gen_test.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go b/vendor/github.com/apache/arrow-go/v18/arrow/encoded/ree_utils.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go rename to vendor/github.com/apache/arrow-go/v18/arrow/encoded/ree_utils.go index 1f71e7b5..fd0c166b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/encoded/ree_utils.go @@ -20,7 +20,7 @@ import ( "math" "sort" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) // FindPhysicalIndex performs a binary search on the run-ends to return diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go b/vendor/github.com/apache/arrow-go/v18/arrow/endian/big.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go rename to vendor/github.com/apache/arrow-go/v18/arrow/endian/big.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go b/vendor/github.com/apache/arrow-go/v18/arrow/endian/endian.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go rename to vendor/github.com/apache/arrow-go/v18/arrow/endian/endian.go index 3ecda7b3..f369945d 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/endian/endian.go @@ -17,8 +17,8 @@ package endian import ( - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" ) type Endianness flatbuf.Endianness diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go b/vendor/github.com/apache/arrow-go/v18/arrow/endian/little.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go rename to vendor/github.com/apache/arrow-go/v18/arrow/endian/little.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/errors.go b/vendor/github.com/apache/arrow-go/v18/arrow/errors.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/errors.go rename to vendor/github.com/apache/arrow-go/v18/arrow/errors.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go b/vendor/github.com/apache/arrow-go/v18/arrow/float16/float16.go similarity index 79% rename from vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go rename to vendor/github.com/apache/arrow-go/v18/arrow/float16/float16.go index 4e03d13d..f61db40e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/float16/float16.go @@ -17,6 +17,7 @@ package float16 import ( + "encoding/binary" "math" "strconv" ) @@ -29,6 +30,11 @@ type Num struct { bits uint16 } +var ( + MaxNum = Num{bits: 0b0111101111111111} + MinNum = MaxNum.Negate() +) + // New creates a new half-precision floating point value from the provided // float32 value. func New(f float32) Num { @@ -86,6 +92,11 @@ func (n Num) Div(rhs Num) Num { return New(n.Float32() / rhs.Float32()) } +// Equal returns true if the value represented by n is == other +func (n Num) Equal(other Num) bool { + return n.Float32() == other.Float32() +} + // Greater returns true if the value represented by n is > other func (n Num) Greater(other Num) bool { return n.Float32() > other.Float32() @@ -152,14 +163,41 @@ func (n Num) Abs() Num { } func (n Num) Sign() int { - f := n.Float32() - if f > 0 { - return 1 - } else if f == 0 { + if n.IsZero() { return 0 + } else if n.Signbit() { + return -1 } - return -1 + return 1 } +func (n Num) Signbit() bool { return (n.bits & 0x8000) != 0 } + +func (n Num) IsNaN() bool { return (n.bits & 0x7fff) > 0x7c00 } + +func (n Num) IsInf() bool { return (n.bits & 0x7c00) == 0x7c00 } + +func (n Num) IsZero() bool { return (n.bits & 0x7fff) == 0 } + func (f Num) Uint16() uint16 { return f.bits } func (f Num) String() string { return strconv.FormatFloat(float64(f.Float32()), 'g', -1, 32) } + +func Inf() Num { return Num{bits: 0x7c00} } + +func NaN() Num { return Num{bits: 0x7fff} } + +func FromBits(src uint16) Num { return Num{bits: src} } + +func FromLEBytes(src []byte) Num { + return Num{bits: binary.LittleEndian.Uint16(src)} +} + +func (f Num) PutLEBytes(dst []byte) { + binary.LittleEndian.PutUint16(dst, f.bits) +} + +func (f Num) ToLEBytes() []byte { + dst := make([]byte, 2) + f.PutLEBytes(dst) + return dst +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/assert_off.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/assert_off.go index 52b9a233..1450ecc9 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/assert_off.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !assert // +build !assert package debug diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/assert_on.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/assert_on.go index 2aa5d6ac..4a57169b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/assert_on.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build assert // +build assert package debug diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/doc.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/doc.go index 3ee1783c..094e427a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/doc.go @@ -17,14 +17,12 @@ /* Package debug provides APIs for conditional runtime assertions and debug logging. - -Using Assert +# Using Assert To enable runtime assertions, build with the assert tag. When the assert tag is omitted, the code for the assertion will be omitted from the binary. - -Using Log +# Using Log To enable runtime debug logs, build with the debug tag. When the debug tag is omitted, the code for logging will be omitted from the binary. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/log_off.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/log_off.go index 48da8e1e..760a5cdc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/log_off.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !debug // +build !debug package debug diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/log_on.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/log_on.go index 99d0c8ae..2588e7d1 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/log_on.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build debug // +build debug package debug diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/util.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/util.go index 7bd3d538..ea4eba7f 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/debug/util.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build debug || assert // +build debug assert package debug diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/dictutils/dict.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/dictutils/dict.go index e09a2f4a..184e29c0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/dictutils/dict.go @@ -21,9 +21,9 @@ import ( "fmt" "hash/maphash" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/memory" ) type Kind int8 @@ -104,7 +104,7 @@ func (d *Mapper) InsertPath(pos FieldPos) { d.hasher.Reset() } -func (d *Mapper) ImportField(pos FieldPos, field *arrow.Field) { +func (d *Mapper) ImportField(pos FieldPos, field arrow.Field) { dt := field.Type if dt.ID() == arrow.EXTENSION { dt = dt.(arrow.ExtensionType).StorageType() @@ -126,13 +126,18 @@ func (d *Mapper) ImportField(pos FieldPos, field *arrow.Field) { func (d *Mapper) ImportFields(pos FieldPos, fields []arrow.Field) { for i := range fields { - d.ImportField(pos.Child(int32(i)), &fields[i]) + d.ImportField(pos.Child(int32(i)), fields[i]) } } func (d *Mapper) ImportSchema(schema *arrow.Schema) { d.pathToID = make(map[uint64]int64) - d.ImportFields(NewFieldPos(), schema.Fields()) + // This code path intentionally avoids calling ImportFields with + // schema.Fields to avoid allocations. + pos := NewFieldPos() + for i := 0; i < schema.NumFields(); i++ { + d.ImportField(pos.Child(int32(i)), schema.Field(i)) + } } func hasUnresolvedNestedDict(data arrow.ArrayData) bool { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Binary.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Binary.go index e8018e74..95e01559 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Binary.go @@ -22,7 +22,7 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Opaque binary data +// / Opaque binary data type Binary struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BinaryView.go similarity index 76% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BinaryView.go index 09ca5e7d..f6906674 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BinaryView.go @@ -22,13 +22,13 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Logically the same as Binary, but the internal representation uses a view -/// struct that contains the string length and either the string's entire data -/// inline (for small strings) or an inlined prefix, an index of another buffer, -/// and an offset pointing to a slice in that buffer (for non-small strings). -/// -/// Since it uses a variable number of data buffers, each Field with this type -/// must have a corresponding entry in `variadicBufferCounts`. +// / Logically the same as Binary, but the internal representation uses a view +// / struct that contains the string length and either the string's entire data +// / inline (for small strings) or an inlined prefix, an index of another buffer, +// / and an offset pointing to a slice in that buffer (for non-small strings). +// / +// / Since it uses a variable number of data buffers, each Field with this type +// / must have a corresponding entry in `variadicBufferCounts`. type BinaryView struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Block.go similarity index 83% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Block.go index 57a697b1..8e33d3e6 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Block.go @@ -35,31 +35,34 @@ func (rcv *Block) Table() flatbuffers.Table { return rcv._tab.Table } -/// Index to the start of the RecordBlock (note this is past the Message header) +// / Index to the start of the RecordBlock (note this is past the Message header) func (rcv *Block) Offset() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) } -/// Index to the start of the RecordBlock (note this is past the Message header) + +// / Index to the start of the RecordBlock (note this is past the Message header) func (rcv *Block) MutateOffset(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) } -/// Length of the metadata +// / Length of the metadata func (rcv *Block) MetaDataLength() int32 { return rcv._tab.GetInt32(rcv._tab.Pos + flatbuffers.UOffsetT(8)) } -/// Length of the metadata + +// / Length of the metadata func (rcv *Block) MutateMetaDataLength(n int32) bool { return rcv._tab.MutateInt32(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) } -/// Length of the data (this is aligned so there can be a gap between this and -/// the metadata). +// / Length of the data (this is aligned so there can be a gap between this and +// / the metadata). func (rcv *Block) BodyLength() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(16)) } -/// Length of the data (this is aligned so there can be a gap between this and -/// the metadata). + +// / Length of the data (this is aligned so there can be a gap between this and +// / the metadata). func (rcv *Block) MutateBodyLength(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(16), n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BodyCompression.go similarity index 83% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BodyCompression.go index 6468e231..c23c2919 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BodyCompression.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Optional compression for the memory buffers constituting IPC message -/// bodies. Intended for use with RecordBatch but could be used for other -/// message types +// / Optional compression for the memory buffers constituting IPC message +// / bodies. Intended for use with RecordBatch but could be used for other +// / message types type BodyCompression struct { _tab flatbuffers.Table } @@ -45,8 +45,8 @@ func (rcv *BodyCompression) Table() flatbuffers.Table { return rcv._tab } -/// Compressor library. -/// For LZ4_FRAME, each compressed buffer must consist of a single frame. +// / Compressor library. +// / For LZ4_FRAME, each compressed buffer must consist of a single frame. func (rcv *BodyCompression) Codec() CompressionType { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -55,13 +55,13 @@ func (rcv *BodyCompression) Codec() CompressionType { return 0 } -/// Compressor library. -/// For LZ4_FRAME, each compressed buffer must consist of a single frame. +// / Compressor library. +// / For LZ4_FRAME, each compressed buffer must consist of a single frame. func (rcv *BodyCompression) MutateCodec(n CompressionType) bool { return rcv._tab.MutateInt8Slot(4, int8(n)) } -/// Indicates the way the record batch body was compressed +// / Indicates the way the record batch body was compressed func (rcv *BodyCompression) Method() BodyCompressionMethod { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -70,7 +70,7 @@ func (rcv *BodyCompression) Method() BodyCompressionMethod { return 0 } -/// Indicates the way the record batch body was compressed +// / Indicates the way the record batch body was compressed func (rcv *BodyCompression) MutateMethod(n BodyCompressionMethod) bool { return rcv._tab.MutateInt8Slot(6, int8(n)) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BodyCompressionMethod.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BodyCompressionMethod.go index 108ab3e0..bb7234b3 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/BodyCompressionMethod.go @@ -20,9 +20,9 @@ package flatbuf import "strconv" -/// Provided for forward compatibility in case we need to support different -/// strategies for compressing the IPC message body (like whole-body -/// compression rather than buffer-level) in the future +// / Provided for forward compatibility in case we need to support different +// / strategies for compressing the IPC message body (like whole-body +// / compression rather than buffer-level) in the future type BodyCompressionMethod int8 const ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Bool.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Bool.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Buffer.go similarity index 64% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Buffer.go index eba8d99b..e650e06a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Buffer.go @@ -22,8 +22,8 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// A Buffer represents a single contiguous memory segment +// / ---------------------------------------------------------------------- +// / A Buffer represents a single contiguous memory segment type Buffer struct { _tab flatbuffers.Struct } @@ -37,30 +37,32 @@ func (rcv *Buffer) Table() flatbuffers.Table { return rcv._tab.Table } -/// The relative offset into the shared memory page where the bytes for this -/// buffer starts +// / The relative offset into the shared memory page where the bytes for this +// / buffer starts func (rcv *Buffer) Offset() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) } -/// The relative offset into the shared memory page where the bytes for this -/// buffer starts + +// / The relative offset into the shared memory page where the bytes for this +// / buffer starts func (rcv *Buffer) MutateOffset(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) } -/// The absolute length (in bytes) of the memory buffer. The memory is found -/// from offset (inclusive) to offset + length (non-inclusive). When building -/// messages using the encapsulated IPC message, padding bytes may be written -/// after a buffer, but such padding bytes do not need to be accounted for in -/// the size here. +// / The absolute length (in bytes) of the memory buffer. The memory is found +// / from offset (inclusive) to offset + length (non-inclusive). When building +// / messages using the encapsulated IPC message, padding bytes may be written +// / after a buffer, but such padding bytes do not need to be accounted for in +// / the size here. func (rcv *Buffer) Length() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8)) } -/// The absolute length (in bytes) of the memory buffer. The memory is found -/// from offset (inclusive) to offset + length (non-inclusive). When building -/// messages using the encapsulated IPC message, padding bytes may be written -/// after a buffer, but such padding bytes do not need to be accounted for in -/// the size here. + +// / The absolute length (in bytes) of the memory buffer. The memory is found +// / from offset (inclusive) to offset + length (non-inclusive). When building +// / messages using the encapsulated IPC message, padding bytes may be written +// / after a buffer, but such padding bytes do not need to be accounted for in +// / the size here. func (rcv *Buffer) MutateLength(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/CompressionType.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/CompressionType.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Date.go similarity index 83% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Date.go index 32983ec5..985a8f79 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Date.go @@ -22,12 +22,12 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Date is either a 32-bit or 64-bit signed integer type representing an -/// elapsed time since UNIX epoch (1970-01-01), stored in either of two units: -/// -/// * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no -/// leap seconds), where the values are evenly divisible by 86400000 -/// * Days (32 bits) since the UNIX epoch +// / Date is either a 32-bit or 64-bit signed integer type representing an +// / elapsed time since UNIX epoch (1970-01-01), stored in either of two units: +// / +// / * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no +// / leap seconds), where the values are evenly divisible by 86400000 +// / * Days (32 bits) since the UNIX epoch type Date struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DateUnit.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DateUnit.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Decimal.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Decimal.go index c9de254d..234c3964 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Decimal.go @@ -23,9 +23,9 @@ import ( ) /// Exact decimal value represented as an integer value in two's -/// complement. Currently only 128-bit (16-byte) and 256-bit (32-byte) integers -/// are used. The representation uses the endianness indicated -/// in the Schema. +/// complement. Currently 32-bit (4-byte), 64-bit (8-byte), +/// 128-bit (16-byte) and 256-bit (32-byte) integers are used. +/// The representation uses the endianness indicated in the Schema. type Decimal struct { _tab flatbuffers.Table } @@ -74,7 +74,7 @@ func (rcv *Decimal) MutateScale(n int32) bool { return rcv._tab.MutateInt32Slot(6, n) } -/// Number of bits per value. The only accepted widths are 128 and 256. +/// Number of bits per value. The accepted widths are 32, 64, 128 and 256. /// We use bitWidth for consistency with Int::bitWidth. func (rcv *Decimal) BitWidth() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) @@ -84,7 +84,7 @@ func (rcv *Decimal) BitWidth() int32 { return 128 } -/// Number of bits per value. The only accepted widths are 128 and 256. +/// Number of bits per value. The accepted widths are 32, 64, 128 and 256. /// We use bitWidth for consistency with Int::bitWidth. func (rcv *Decimal) MutateBitWidth(n int32) bool { return rcv._tab.MutateInt32Slot(8, n) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryBatch.go similarity index 79% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryBatch.go index 25b5384e..999c5fda 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryBatch.go @@ -22,12 +22,12 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// For sending dictionary encoding information. Any Field can be -/// dictionary-encoded, but in this case none of its children may be -/// dictionary-encoded. -/// There is one vector / column per dictionary, but that vector / column -/// may be spread across multiple dictionary batches by using the isDelta -/// flag +// / For sending dictionary encoding information. Any Field can be +// / dictionary-encoded, but in this case none of its children may be +// / dictionary-encoded. +// / There is one vector / column per dictionary, but that vector / column +// / may be spread across multiple dictionary batches by using the isDelta +// / flag type DictionaryBatch struct { _tab flatbuffers.Table } @@ -73,9 +73,9 @@ func (rcv *DictionaryBatch) Data(obj *RecordBatch) *RecordBatch { return nil } -/// If isDelta is true the values in the dictionary are to be appended to a -/// dictionary with the indicated id. If isDelta is false this dictionary -/// should replace the existing dictionary. +// / If isDelta is true the values in the dictionary are to be appended to a +// / dictionary with the indicated id. If isDelta is false this dictionary +// / should replace the existing dictionary. func (rcv *DictionaryBatch) IsDelta() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -84,9 +84,9 @@ func (rcv *DictionaryBatch) IsDelta() bool { return false } -/// If isDelta is true the values in the dictionary are to be appended to a -/// dictionary with the indicated id. If isDelta is false this dictionary -/// should replace the existing dictionary. +// / If isDelta is true the values in the dictionary are to be appended to a +// / dictionary with the indicated id. If isDelta is false this dictionary +// / should replace the existing dictionary. func (rcv *DictionaryBatch) MutateIsDelta(n bool) bool { return rcv._tab.MutateBoolSlot(8, n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryEncoding.go similarity index 67% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryEncoding.go index a9b09530..44c38742 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryEncoding.go @@ -42,9 +42,9 @@ func (rcv *DictionaryEncoding) Table() flatbuffers.Table { return rcv._tab } -/// The known dictionary id in the application where this data is used. In -/// the file or streaming formats, the dictionary ids are found in the -/// DictionaryBatch messages +// / The known dictionary id in the application where this data is used. In +// / the file or streaming formats, the dictionary ids are found in the +// / DictionaryBatch messages func (rcv *DictionaryEncoding) Id() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -53,18 +53,18 @@ func (rcv *DictionaryEncoding) Id() int64 { return 0 } -/// The known dictionary id in the application where this data is used. In -/// the file or streaming formats, the dictionary ids are found in the -/// DictionaryBatch messages +// / The known dictionary id in the application where this data is used. In +// / the file or streaming formats, the dictionary ids are found in the +// / DictionaryBatch messages func (rcv *DictionaryEncoding) MutateId(n int64) bool { return rcv._tab.MutateInt64Slot(4, n) } -/// The dictionary indices are constrained to be non-negative integers. If -/// this field is null, the indices must be signed int32. To maximize -/// cross-language compatibility and performance, implementations are -/// recommended to prefer signed integer types over unsigned integer types -/// and to avoid uint64 indices unless they are required by an application. +// / The dictionary indices are constrained to be non-negative integers. If +// / this field is null, the indices must be signed int32. To maximize +// / cross-language compatibility and performance, implementations are +// / recommended to prefer signed integer types over unsigned integer types +// / and to avoid uint64 indices unless they are required by an application. func (rcv *DictionaryEncoding) IndexType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -78,15 +78,15 @@ func (rcv *DictionaryEncoding) IndexType(obj *Int) *Int { return nil } -/// The dictionary indices are constrained to be non-negative integers. If -/// this field is null, the indices must be signed int32. To maximize -/// cross-language compatibility and performance, implementations are -/// recommended to prefer signed integer types over unsigned integer types -/// and to avoid uint64 indices unless they are required by an application. -/// By default, dictionaries are not ordered, or the order does not have -/// semantic meaning. In some statistical, applications, dictionary-encoding -/// is used to represent ordered categorical data, and we provide a way to -/// preserve that metadata here +// / The dictionary indices are constrained to be non-negative integers. If +// / this field is null, the indices must be signed int32. To maximize +// / cross-language compatibility and performance, implementations are +// / recommended to prefer signed integer types over unsigned integer types +// / and to avoid uint64 indices unless they are required by an application. +// / By default, dictionaries are not ordered, or the order does not have +// / semantic meaning. In some statistical, applications, dictionary-encoding +// / is used to represent ordered categorical data, and we provide a way to +// / preserve that metadata here func (rcv *DictionaryEncoding) IsOrdered() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -95,10 +95,10 @@ func (rcv *DictionaryEncoding) IsOrdered() bool { return false } -/// By default, dictionaries are not ordered, or the order does not have -/// semantic meaning. In some statistical, applications, dictionary-encoding -/// is used to represent ordered categorical data, and we provide a way to -/// preserve that metadata here +// / By default, dictionaries are not ordered, or the order does not have +// / semantic meaning. In some statistical, applications, dictionary-encoding +// / is used to represent ordered categorical data, and we provide a way to +// / preserve that metadata here func (rcv *DictionaryEncoding) MutateIsOrdered(n bool) bool { return rcv._tab.MutateBoolSlot(8, n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryKind.go similarity index 82% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryKind.go index 126ba5f7..68251005 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/DictionaryKind.go @@ -20,11 +20,11 @@ package flatbuf import "strconv" -/// ---------------------------------------------------------------------- -/// Dictionary encoding metadata -/// Maintained for forwards compatibility, in the future -/// Dictionaries might be explicit maps between integers and values -/// allowing for non-contiguous index values +// / ---------------------------------------------------------------------- +// / Dictionary encoding metadata +// / Maintained for forwards compatibility, in the future +// / Dictionaries might be explicit maps between integers and values +// / allowing for non-contiguous index values type DictionaryKind int16 const ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Duration.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Duration.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Endianness.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Endianness.go index cefa2ff9..c9619b7b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Endianness.go @@ -20,8 +20,8 @@ package flatbuf import "strconv" -/// ---------------------------------------------------------------------- -/// Endianness of the platform producing the data +// / ---------------------------------------------------------------------- +// / Endianness of the platform producing the data type Endianness int16 const ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Feature.go similarity index 65% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Feature.go index ae5a0398..2204c440 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Feature.go @@ -20,35 +20,35 @@ package flatbuf import "strconv" -/// Represents Arrow Features that might not have full support -/// within implementations. This is intended to be used in -/// two scenarios: -/// 1. A mechanism for readers of Arrow Streams -/// and files to understand that the stream or file makes -/// use of a feature that isn't supported or unknown to -/// the implementation (and therefore can meet the Arrow -/// forward compatibility guarantees). -/// 2. A means of negotiating between a client and server -/// what features a stream is allowed to use. The enums -/// values here are intented to represent higher level -/// features, additional details maybe negotiated -/// with key-value pairs specific to the protocol. -/// -/// Enums added to this list should be assigned power-of-two values -/// to facilitate exchanging and comparing bitmaps for supported -/// features. +// / Represents Arrow Features that might not have full support +// / within implementations. This is intended to be used in +// / two scenarios: +// / 1. A mechanism for readers of Arrow Streams +// / and files to understand that the stream or file makes +// / use of a feature that isn't supported or unknown to +// / the implementation (and therefore can meet the Arrow +// / forward compatibility guarantees). +// / 2. A means of negotiating between a client and server +// / what features a stream is allowed to use. The enums +// / values here are intented to represent higher level +// / features, additional details maybe negotiated +// / with key-value pairs specific to the protocol. +// / +// / Enums added to this list should be assigned power-of-two values +// / to facilitate exchanging and comparing bitmaps for supported +// / features. type Feature int64 const ( /// Needed to make flatbuffers happy. - FeatureUNUSED Feature = 0 + FeatureUNUSED Feature = 0 /// The stream makes use of multiple full dictionaries with the /// same ID and assumes clients implement dictionary replacement /// correctly. FeatureDICTIONARY_REPLACEMENT Feature = 1 /// The stream makes use of compressed bodies as described /// in Message.fbs. - FeatureCOMPRESSED_BODY Feature = 2 + FeatureCOMPRESSED_BODY Feature = 2 ) var EnumNamesFeature = map[Feature]string{ diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Field.go similarity index 82% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Field.go index c03cf2f8..8aed29bc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Field.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// A field represents a named column in a record / row batch or child of a -/// nested type. +// / ---------------------------------------------------------------------- +// / A field represents a named column in a record / row batch or child of a +// / nested type. type Field struct { _tab flatbuffers.Table } @@ -45,7 +45,7 @@ func (rcv *Field) Table() flatbuffers.Table { return rcv._tab } -/// Name is not required, in i.e. a List +// / Name is not required, in i.e. a List func (rcv *Field) Name() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -54,8 +54,8 @@ func (rcv *Field) Name() []byte { return nil } -/// Name is not required, in i.e. a List -/// Whether or not this field can contain nulls. Should be true in general. +// / Name is not required, in i.e. a List +// / Whether or not this field can contain nulls. Should be true in general. func (rcv *Field) Nullable() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -64,7 +64,7 @@ func (rcv *Field) Nullable() bool { return false } -/// Whether or not this field can contain nulls. Should be true in general. +// / Whether or not this field can contain nulls. Should be true in general. func (rcv *Field) MutateNullable(n bool) bool { return rcv._tab.MutateBoolSlot(6, n) } @@ -81,7 +81,7 @@ func (rcv *Field) MutateTypeType(n Type) bool { return rcv._tab.MutateByteSlot(8, byte(n)) } -/// This is the type of the decoded value if the field is dictionary encoded. +// / This is the type of the decoded value if the field is dictionary encoded. func (rcv *Field) Type(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -91,8 +91,8 @@ func (rcv *Field) Type(obj *flatbuffers.Table) bool { return false } -/// This is the type of the decoded value if the field is dictionary encoded. -/// Present only if the field is dictionary encoded. +// / This is the type of the decoded value if the field is dictionary encoded. +// / Present only if the field is dictionary encoded. func (rcv *Field) Dictionary(obj *DictionaryEncoding) *DictionaryEncoding { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { @@ -106,9 +106,9 @@ func (rcv *Field) Dictionary(obj *DictionaryEncoding) *DictionaryEncoding { return nil } -/// Present only if the field is dictionary encoded. -/// children apply only to nested data types like Struct, List and Union. For -/// primitive types children will have length 0. +// / Present only if the field is dictionary encoded. +// / children apply only to nested data types like Struct, List and Union. For +// / primitive types children will have length 0. func (rcv *Field) Children(obj *Field, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { @@ -129,9 +129,9 @@ func (rcv *Field) ChildrenLength() int { return 0 } -/// children apply only to nested data types like Struct, List and Union. For -/// primitive types children will have length 0. -/// User-defined metadata +// / children apply only to nested data types like Struct, List and Union. For +// / primitive types children will have length 0. +// / User-defined metadata func (rcv *Field) CustomMetadata(obj *KeyValue, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { @@ -152,7 +152,7 @@ func (rcv *Field) CustomMetadataLength() int { return 0 } -/// User-defined metadata +// / User-defined metadata func FieldStart(builder *flatbuffers.Builder) { builder.StartObject(7) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FieldNode.go similarity index 62% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FieldNode.go index 606b30bf..0e258a3d 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FieldNode.go @@ -22,15 +22,15 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// Data structures for describing a table row batch (a collection of -/// equal-length Arrow arrays) -/// Metadata about a field at some level of a nested type tree (but not -/// its children). -/// -/// For example, a List with values `[[1, 2, 3], null, [4], [5, 6], null]` -/// would have {length: 5, null_count: 2} for its List node, and {length: 6, -/// null_count: 0} for its Int16 node, as separate FieldNode structs +// / ---------------------------------------------------------------------- +// / Data structures for describing a table row batch (a collection of +// / equal-length Arrow arrays) +// / Metadata about a field at some level of a nested type tree (but not +// / its children). +// / +// / For example, a List with values `[[1, 2, 3], null, [4], [5, 6], null]` +// / would have {length: 5, null_count: 2} for its List node, and {length: 6, +// / null_count: 0} for its Int16 node, as separate FieldNode structs type FieldNode struct { _tab flatbuffers.Struct } @@ -44,26 +44,28 @@ func (rcv *FieldNode) Table() flatbuffers.Table { return rcv._tab.Table } -/// The number of value slots in the Arrow array at this level of a nested -/// tree +// / The number of value slots in the Arrow array at this level of a nested +// / tree func (rcv *FieldNode) Length() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0)) } -/// The number of value slots in the Arrow array at this level of a nested -/// tree + +// / The number of value slots in the Arrow array at this level of a nested +// / tree func (rcv *FieldNode) MutateLength(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n) } -/// The number of observed nulls. Fields with null_count == 0 may choose not -/// to write their physical validity bitmap out as a materialized buffer, -/// instead setting the length of the bitmap buffer to 0. +// / The number of observed nulls. Fields with null_count == 0 may choose not +// / to write their physical validity bitmap out as a materialized buffer, +// / instead setting the length of the bitmap buffer to 0. func (rcv *FieldNode) NullCount() int64 { return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8)) } -/// The number of observed nulls. Fields with null_count == 0 may choose not -/// to write their physical validity bitmap out as a materialized buffer, -/// instead setting the length of the bitmap buffer to 0. + +// / The number of observed nulls. Fields with null_count == 0 may choose not +// / to write their physical validity bitmap out as a materialized buffer, +// / instead setting the length of the bitmap buffer to 0. func (rcv *FieldNode) MutateNullCount(n int64) bool { return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FixedSizeBinary.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FixedSizeBinary.go index 4e660d50..2725dfb9 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FixedSizeBinary.go @@ -42,7 +42,7 @@ func (rcv *FixedSizeBinary) Table() flatbuffers.Table { return rcv._tab } -/// Number of bytes per value +// / Number of bytes per value func (rcv *FixedSizeBinary) ByteWidth() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -51,7 +51,7 @@ func (rcv *FixedSizeBinary) ByteWidth() int32 { return 0 } -/// Number of bytes per value +// / Number of bytes per value func (rcv *FixedSizeBinary) MutateByteWidth(n int32) bool { return rcv._tab.MutateInt32Slot(4, n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FixedSizeList.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FixedSizeList.go index dabf5cc8..534ca27f 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FixedSizeList.go @@ -42,7 +42,7 @@ func (rcv *FixedSizeList) Table() flatbuffers.Table { return rcv._tab } -/// Number of list items per value +// / Number of list items per value func (rcv *FixedSizeList) ListSize() int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -51,7 +51,7 @@ func (rcv *FixedSizeList) ListSize() int32 { return 0 } -/// Number of list items per value +// / Number of list items per value func (rcv *FixedSizeList) MutateListSize(n int32) bool { return rcv._tab.MutateInt32Slot(4, n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FloatingPoint.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/FloatingPoint.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Footer.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Footer.go index 65b0ff09..d65af41e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Footer.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// Arrow File metadata -/// +// / ---------------------------------------------------------------------- +// / Arrow File metadata +// / type Footer struct { _tab flatbuffers.Table } @@ -108,7 +108,7 @@ func (rcv *Footer) RecordBatchesLength() int { return 0 } -/// User-defined metadata +// / User-defined metadata func (rcv *Footer) CustomMetadata(obj *KeyValue, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { @@ -129,7 +129,7 @@ func (rcv *Footer) CustomMetadataLength() int { return 0 } -/// User-defined metadata +// / User-defined metadata func FooterStart(builder *flatbuffers.Builder) { builder.StartObject(5) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Int.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Int.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Interval.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Interval.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/IntervalUnit.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/IntervalUnit.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/KeyValue.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/KeyValue.go index c1b85318..0cd5dc62 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/KeyValue.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// user defined key value pairs to add custom metadata to arrow -/// key namespacing is the responsibility of the user +// / ---------------------------------------------------------------------- +// / user defined key value pairs to add custom metadata to arrow +// / key namespacing is the responsibility of the user type KeyValue struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeBinary.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeBinary.go index 2c3befcc..b25ecc41 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeBinary.go @@ -22,8 +22,8 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Same as Binary, but with 64-bit offsets, allowing to represent -/// extremely large data values. +// / Same as Binary, but with 64-bit offsets, allowing to represent +// / extremely large data values. type LargeBinary struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeList.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeList.go index 92f22845..d8bfb9c0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeList.go @@ -22,8 +22,8 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Same as List, but with 64-bit offsets, allowing to represent -/// extremely large data values. +// / Same as List, but with 64-bit offsets, allowing to represent +// / extremely large data values. type LargeList struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeListView.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeListView.go index 5b1df149..4608c1de 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeListView.go @@ -22,8 +22,8 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Same as ListView, but with 64-bit offsets and sizes, allowing to represent -/// extremely large data values. +// / Same as ListView, but with 64-bit offsets and sizes, allowing to represent +// / extremely large data values. type LargeListView struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeUtf8.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeUtf8.go index e78b33e1..4478fed8 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/LargeUtf8.go @@ -22,8 +22,8 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Same as Utf8, but with 64-bit offsets, allowing to represent -/// extremely large data values. +// / Same as Utf8, but with 64-bit offsets, allowing to represent +// / extremely large data values. type LargeUtf8 struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/List.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/List.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/ListView.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/ListView.go index 46b1e0b3..cde43cf5 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/ListView.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Represents the same logical types that List can, but contains offsets and -/// sizes allowing for writes in any order and sharing of child values among -/// list values. +// / Represents the same logical types that List can, but contains offsets and +// / sizes allowing for writes in any order and sharing of child values among +// / list values. type ListView struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Map.go similarity index 60% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Map.go index 8802aba1..d4871e55 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Map.go @@ -22,31 +22,31 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// A Map is a logical nested type that is represented as -/// -/// List> -/// -/// In this layout, the keys and values are each respectively contiguous. We do -/// not constrain the key and value types, so the application is responsible -/// for ensuring that the keys are hashable and unique. Whether the keys are sorted -/// may be set in the metadata for this field. -/// -/// In a field with Map type, the field has a child Struct field, which then -/// has two children: key type and the second the value type. The names of the -/// child fields may be respectively "entries", "key", and "value", but this is -/// not enforced. -/// -/// Map -/// ```text -/// - child[0] entries: Struct -/// - child[0] key: K -/// - child[1] value: V -/// ``` -/// Neither the "entries" field nor the "key" field may be nullable. -/// -/// The metadata is structured so that Arrow systems without special handling -/// for Map can make Map an alias for List. The "layout" attribute for the Map -/// field must have the same contents as a List. +// / A Map is a logical nested type that is represented as +// / +// / List> +// / +// / In this layout, the keys and values are each respectively contiguous. We do +// / not constrain the key and value types, so the application is responsible +// / for ensuring that the keys are hashable and unique. Whether the keys are sorted +// / may be set in the metadata for this field. +// / +// / In a field with Map type, the field has a child Struct field, which then +// / has two children: key type and the second the value type. The names of the +// / child fields may be respectively "entries", "key", and "value", but this is +// / not enforced. +// / +// / Map +// / ```text +// / - child[0] entries: Struct +// / - child[0] key: K +// / - child[1] value: V +// / ``` +// / Neither the "entries" field nor the "key" field may be nullable. +// / +// / The metadata is structured so that Arrow systems without special handling +// / for Map can make Map an alias for List. The "layout" attribute for the Map +// / field must have the same contents as a List. type Map struct { _tab flatbuffers.Table } @@ -67,7 +67,7 @@ func (rcv *Map) Table() flatbuffers.Table { return rcv._tab } -/// Set to true if the keys within each value are sorted +// / Set to true if the keys within each value are sorted func (rcv *Map) KeysSorted() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -76,7 +76,7 @@ func (rcv *Map) KeysSorted() bool { return false } -/// Set to true if the keys within each value are sorted +// / Set to true if the keys within each value are sorted func (rcv *Map) MutateKeysSorted(n bool) bool { return rcv._tab.MutateBoolSlot(4, n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Message.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Message.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/MessageHeader.go similarity index 81% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/MessageHeader.go index c12fc105..d7f9907c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/MessageHeader.go @@ -20,14 +20,14 @@ package flatbuf import "strconv" -/// ---------------------------------------------------------------------- -/// The root Message type -/// This union enables us to easily send different message types without -/// redundant storage, and in the future we can easily add new message types. -/// -/// Arrow implementations do not need to implement all of the message types, -/// which may include experimental metadata types. For maximum compatibility, -/// it is best to send data using RecordBatch +// / ---------------------------------------------------------------------- +// / The root Message type +// / This union enables us to easily send different message types without +// / redundant storage, and in the future we can easily add new message types. +// / +// / Arrow implementations do not need to implement all of the message types, +// / which may include experimental metadata types. For maximum compatibility, +// / it is best to send data using RecordBatch type MessageHeader byte const ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/MetadataVersion.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/MetadataVersion.go index 21b234f9..bb5e99dd 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/MetadataVersion.go @@ -31,7 +31,7 @@ const ( MetadataVersionV3 MetadataVersion = 2 /// >= 0.8.0 (December 2017). Non-backwards compatible with V3. MetadataVersionV4 MetadataVersion = 3 - /// >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4 + /// >= 1.0.0 (July 2020). Backwards compatible with V4 (V5 readers can read V4 /// metadata and IPC messages). Implementations are recommended to provide a /// V4 compatibility mode with V5 format changes disabled. /// diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Null.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Null.go index 3c3eb4bd..3b93a1b6 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Null.go @@ -22,7 +22,7 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// These are stored in the flatbuffer in the Type union below +// / These are stored in the flatbuffer in the Type union below type Null struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Precision.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Precision.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RecordBatch.go similarity index 62% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RecordBatch.go index c50f4a6e..52c72a8a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RecordBatch.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// A data header describing the shared memory layout of a "record" or "row" -/// batch. Some systems call this a "row batch" internally and others a "record -/// batch". +// / A data header describing the shared memory layout of a "record" or "row" +// / batch. Some systems call this a "row batch" internally and others a "record +// / batch". type RecordBatch struct { _tab flatbuffers.Table } @@ -45,8 +45,8 @@ func (rcv *RecordBatch) Table() flatbuffers.Table { return rcv._tab } -/// number of records / rows. The arrays in the batch should all have this -/// length +// / number of records / rows. The arrays in the batch should all have this +// / length func (rcv *RecordBatch) Length() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -55,13 +55,13 @@ func (rcv *RecordBatch) Length() int64 { return 0 } -/// number of records / rows. The arrays in the batch should all have this -/// length +// / number of records / rows. The arrays in the batch should all have this +// / length func (rcv *RecordBatch) MutateLength(n int64) bool { return rcv._tab.MutateInt64Slot(4, n) } -/// Nodes correspond to the pre-ordered flattened logical schema +// / Nodes correspond to the pre-ordered flattened logical schema func (rcv *RecordBatch) Nodes(obj *FieldNode, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -81,13 +81,13 @@ func (rcv *RecordBatch) NodesLength() int { return 0 } -/// Nodes correspond to the pre-ordered flattened logical schema -/// Buffers correspond to the pre-ordered flattened buffer tree -/// -/// The number of buffers appended to this list depends on the schema. For -/// example, most primitive arrays will have 2 buffers, 1 for the validity -/// bitmap and 1 for the values. For struct arrays, there will only be a -/// single buffer for the validity (nulls) bitmap +// / Nodes correspond to the pre-ordered flattened logical schema +// / Buffers correspond to the pre-ordered flattened buffer tree +// / +// / The number of buffers appended to this list depends on the schema. For +// / example, most primitive arrays will have 2 buffers, 1 for the validity +// / bitmap and 1 for the values. For struct arrays, there will only be a +// / single buffer for the validity (nulls) bitmap func (rcv *RecordBatch) Buffers(obj *Buffer, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -107,13 +107,13 @@ func (rcv *RecordBatch) BuffersLength() int { return 0 } -/// Buffers correspond to the pre-ordered flattened buffer tree -/// -/// The number of buffers appended to this list depends on the schema. For -/// example, most primitive arrays will have 2 buffers, 1 for the validity -/// bitmap and 1 for the values. For struct arrays, there will only be a -/// single buffer for the validity (nulls) bitmap -/// Optional compression of the message body +// / Buffers correspond to the pre-ordered flattened buffer tree +// / +// / The number of buffers appended to this list depends on the schema. For +// / example, most primitive arrays will have 2 buffers, 1 for the validity +// / bitmap and 1 for the values. For struct arrays, there will only be a +// / single buffer for the validity (nulls) bitmap +// / Optional compression of the message body func (rcv *RecordBatch) Compression(obj *BodyCompression) *BodyCompression { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -127,21 +127,21 @@ func (rcv *RecordBatch) Compression(obj *BodyCompression) *BodyCompression { return nil } -/// Optional compression of the message body -/// Some types such as Utf8View are represented using a variable number of buffers. -/// For each such Field in the pre-ordered flattened logical schema, there will be -/// an entry in variadicBufferCounts to indicate the number of number of variadic -/// buffers which belong to that Field in the current RecordBatch. -/// -/// For example, the schema -/// col1: Struct -/// col2: Utf8View -/// contains two Fields with variadic buffers so variadicBufferCounts will have -/// two entries, the first counting the variadic buffers of `col1.beta` and the -/// second counting `col2`'s. -/// -/// This field may be omitted if and only if the schema contains no Fields with -/// a variable number of buffers, such as BinaryView and Utf8View. +// / Optional compression of the message body +// / Some types such as Utf8View are represented using a variable number of buffers. +// / For each such Field in the pre-ordered flattened logical schema, there will be +// / an entry in variadicBufferCounts to indicate the number of number of variadic +// / buffers which belong to that Field in the current RecordBatch. +// / +// / For example, the schema +// / col1: Struct +// / col2: Utf8View +// / contains two Fields with variadic buffers so variadicBufferCounts will have +// / two entries, the first counting the variadic buffers of `col1.beta` and the +// / second counting `col2`'s. +// / +// / This field may be omitted if and only if the schema contains no Fields with +// / a variable number of buffers, such as BinaryView and Utf8View. func (rcv *RecordBatch) VariadicBufferCounts(j int) int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { @@ -159,20 +159,20 @@ func (rcv *RecordBatch) VariadicBufferCountsLength() int { return 0 } -/// Some types such as Utf8View are represented using a variable number of buffers. -/// For each such Field in the pre-ordered flattened logical schema, there will be -/// an entry in variadicBufferCounts to indicate the number of number of variadic -/// buffers which belong to that Field in the current RecordBatch. -/// -/// For example, the schema -/// col1: Struct -/// col2: Utf8View -/// contains two Fields with variadic buffers so variadicBufferCounts will have -/// two entries, the first counting the variadic buffers of `col1.beta` and the -/// second counting `col2`'s. -/// -/// This field may be omitted if and only if the schema contains no Fields with -/// a variable number of buffers, such as BinaryView and Utf8View. +// / Some types such as Utf8View are represented using a variable number of buffers. +// / For each such Field in the pre-ordered flattened logical schema, there will be +// / an entry in variadicBufferCounts to indicate the number of number of variadic +// / buffers which belong to that Field in the current RecordBatch. +// / +// / For example, the schema +// / col1: Struct +// / col2: Utf8View +// / contains two Fields with variadic buffers so variadicBufferCounts will have +// / two entries, the first counting the variadic buffers of `col1.beta` and the +// / second counting `col2`'s. +// / +// / This field may be omitted if and only if the schema contains no Fields with +// / a variable number of buffers, such as BinaryView and Utf8View. func (rcv *RecordBatch) MutateVariadicBufferCounts(j int, n int64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RunEndEncoded.go similarity index 83% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RunEndEncoded.go index fa414c1b..b88460b2 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RunEndEncoded.go @@ -22,11 +22,11 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Contains two child arrays, run_ends and values. -/// The run_ends child array must be a 16/32/64-bit integer array -/// which encodes the indices at which the run with the value in -/// each corresponding index in the values child array ends. -/// Like list/struct types, the value array can be of any type. +// / Contains two child arrays, run_ends and values. +// / The run_ends child array must be a 16/32/64-bit integer array +// / which encodes the indices at which the run with the value in +// / each corresponding index in the values child array ends. +// / Like list/struct types, the value array can be of any type. type RunEndEncoded struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RunLengthEncoded.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/RunLengthEncoded.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Schema.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Schema.go index 4ee5ecc9..ae5b248a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Schema.go @@ -22,8 +22,8 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// A Schema describes the columns in a row batch +// / ---------------------------------------------------------------------- +// / A Schema describes the columns in a row batch type Schema struct { _tab flatbuffers.Table } @@ -44,9 +44,9 @@ func (rcv *Schema) Table() flatbuffers.Table { return rcv._tab } -/// endianness of the buffer -/// it is Little Endian by default -/// if endianness doesn't match the underlying system then the vectors need to be converted +// / endianness of the buffer +// / it is Little Endian by default +// / if endianness doesn't match the underlying system then the vectors need to be converted func (rcv *Schema) Endianness() Endianness { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -55,9 +55,9 @@ func (rcv *Schema) Endianness() Endianness { return 0 } -/// endianness of the buffer -/// it is Little Endian by default -/// if endianness doesn't match the underlying system then the vectors need to be converted +// / endianness of the buffer +// / it is Little Endian by default +// / if endianness doesn't match the underlying system then the vectors need to be converted func (rcv *Schema) MutateEndianness(n Endianness) bool { return rcv._tab.MutateInt16Slot(4, int16(n)) } @@ -102,7 +102,7 @@ func (rcv *Schema) CustomMetadataLength() int { return 0 } -/// Features used in the stream/file. +// / Features used in the stream/file. func (rcv *Schema) Features(j int) Feature { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -120,7 +120,7 @@ func (rcv *Schema) FeaturesLength() int { return 0 } -/// Features used in the stream/file. +// / Features used in the stream/file. func (rcv *Schema) MutateFeatures(j int, n Feature) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixIndexCSR.go similarity index 59% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixIndexCSR.go index de821765..2477af10 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixIndexCSR.go @@ -22,7 +22,7 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Compressed Sparse Row format, that is matrix-specific. +// / Compressed Sparse Row format, that is matrix-specific. type SparseMatrixIndexCSR struct { _tab flatbuffers.Table } @@ -43,7 +43,7 @@ func (rcv *SparseMatrixIndexCSR) Table() flatbuffers.Table { return rcv._tab } -/// The type of values in indptrBuffer +// / The type of values in indptrBuffer func (rcv *SparseMatrixIndexCSR) IndptrType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -57,29 +57,29 @@ func (rcv *SparseMatrixIndexCSR) IndptrType(obj *Int) *Int { return nil } -/// The type of values in indptrBuffer -/// indptrBuffer stores the location and size of indptr array that -/// represents the range of the rows. -/// The i-th row spans from indptr[i] to indptr[i+1] in the data. -/// The length of this array is 1 + (the number of rows), and the type -/// of index value is long. -/// -/// For example, let X be the following 6x4 matrix: -/// -/// X := [[0, 1, 2, 0], -/// [0, 0, 3, 0], -/// [0, 4, 0, 5], -/// [0, 0, 0, 0], -/// [6, 0, 7, 8], -/// [0, 9, 0, 0]]. -/// -/// The array of non-zero values in X is: -/// -/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. -/// -/// And the indptr of X is: -/// -/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +// / The type of values in indptrBuffer +// / indptrBuffer stores the location and size of indptr array that +// / represents the range of the rows. +// / The i-th row spans from indptr[i] to indptr[i+1] in the data. +// / The length of this array is 1 + (the number of rows), and the type +// / of index value is long. +// / +// / For example, let X be the following 6x4 matrix: +// / +// / X := [[0, 1, 2, 0], +// / [0, 0, 3, 0], +// / [0, 4, 0, 5], +// / [0, 0, 0, 0], +// / [6, 0, 7, 8], +// / [0, 9, 0, 0]]. +// / +// / The array of non-zero values in X is: +// / +// / values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +// / +// / And the indptr of X is: +// / +// / indptr(X) = [0, 2, 3, 5, 5, 8, 10]. func (rcv *SparseMatrixIndexCSR) IndptrBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -93,29 +93,29 @@ func (rcv *SparseMatrixIndexCSR) IndptrBuffer(obj *Buffer) *Buffer { return nil } -/// indptrBuffer stores the location and size of indptr array that -/// represents the range of the rows. -/// The i-th row spans from indptr[i] to indptr[i+1] in the data. -/// The length of this array is 1 + (the number of rows), and the type -/// of index value is long. -/// -/// For example, let X be the following 6x4 matrix: -/// -/// X := [[0, 1, 2, 0], -/// [0, 0, 3, 0], -/// [0, 4, 0, 5], -/// [0, 0, 0, 0], -/// [6, 0, 7, 8], -/// [0, 9, 0, 0]]. -/// -/// The array of non-zero values in X is: -/// -/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. -/// -/// And the indptr of X is: -/// -/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. -/// The type of values in indicesBuffer +// / indptrBuffer stores the location and size of indptr array that +// / represents the range of the rows. +// / The i-th row spans from indptr[i] to indptr[i+1] in the data. +// / The length of this array is 1 + (the number of rows), and the type +// / of index value is long. +// / +// / For example, let X be the following 6x4 matrix: +// / +// / X := [[0, 1, 2, 0], +// / [0, 0, 3, 0], +// / [0, 4, 0, 5], +// / [0, 0, 0, 0], +// / [6, 0, 7, 8], +// / [0, 9, 0, 0]]. +// / +// / The array of non-zero values in X is: +// / +// / values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +// / +// / And the indptr of X is: +// / +// / indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +// / The type of values in indicesBuffer func (rcv *SparseMatrixIndexCSR) IndicesType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -129,16 +129,16 @@ func (rcv *SparseMatrixIndexCSR) IndicesType(obj *Int) *Int { return nil } -/// The type of values in indicesBuffer -/// indicesBuffer stores the location and size of the array that -/// contains the column indices of the corresponding non-zero values. -/// The type of index value is long. -/// -/// For example, the indices of the above X is: -/// -/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. -/// -/// Note that the indices are sorted in lexicographical order for each row. +// / The type of values in indicesBuffer +// / indicesBuffer stores the location and size of the array that +// / contains the column indices of the corresponding non-zero values. +// / The type of index value is long. +// / +// / For example, the indices of the above X is: +// / +// / indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +// / +// / Note that the indices are sorted in lexicographical order for each row. func (rcv *SparseMatrixIndexCSR) IndicesBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -152,15 +152,15 @@ func (rcv *SparseMatrixIndexCSR) IndicesBuffer(obj *Buffer) *Buffer { return nil } -/// indicesBuffer stores the location and size of the array that -/// contains the column indices of the corresponding non-zero values. -/// The type of index value is long. -/// -/// For example, the indices of the above X is: -/// -/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. -/// -/// Note that the indices are sorted in lexicographical order for each row. +// / indicesBuffer stores the location and size of the array that +// / contains the column indices of the corresponding non-zero values. +// / The type of index value is long. +// / +// / For example, the indices of the above X is: +// / +// / indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +// / +// / Note that the indices are sorted in lexicographical order for each row. func SparseMatrixIndexCSRStart(builder *flatbuffers.Builder) { builder.StartObject(4) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixIndexCSX.go similarity index 60% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixIndexCSX.go index c28cc5d0..7f262dee 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseMatrixIndexCSX.go @@ -22,7 +22,7 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Compressed Sparse format, that is matrix-specific. +// / Compressed Sparse format, that is matrix-specific. type SparseMatrixIndexCSX struct { _tab flatbuffers.Table } @@ -43,7 +43,7 @@ func (rcv *SparseMatrixIndexCSX) Table() flatbuffers.Table { return rcv._tab } -/// Which axis, row or column, is compressed +// / Which axis, row or column, is compressed func (rcv *SparseMatrixIndexCSX) CompressedAxis() SparseMatrixCompressedAxis { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -52,12 +52,12 @@ func (rcv *SparseMatrixIndexCSX) CompressedAxis() SparseMatrixCompressedAxis { return 0 } -/// Which axis, row or column, is compressed +// / Which axis, row or column, is compressed func (rcv *SparseMatrixIndexCSX) MutateCompressedAxis(n SparseMatrixCompressedAxis) bool { return rcv._tab.MutateInt16Slot(4, int16(n)) } -/// The type of values in indptrBuffer +// / The type of values in indptrBuffer func (rcv *SparseMatrixIndexCSX) IndptrType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -71,30 +71,30 @@ func (rcv *SparseMatrixIndexCSX) IndptrType(obj *Int) *Int { return nil } -/// The type of values in indptrBuffer -/// indptrBuffer stores the location and size of indptr array that -/// represents the range of the rows. -/// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. -/// The length of this array is 1 + (the number of rows), and the type -/// of index value is long. -/// -/// For example, let X be the following 6x4 matrix: -/// ```text -/// X := [[0, 1, 2, 0], -/// [0, 0, 3, 0], -/// [0, 4, 0, 5], -/// [0, 0, 0, 0], -/// [6, 0, 7, 8], -/// [0, 9, 0, 0]]. -/// ``` -/// The array of non-zero values in X is: -/// ```text -/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. -/// ``` -/// And the indptr of X is: -/// ```text -/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. -/// ``` +// / The type of values in indptrBuffer +// / indptrBuffer stores the location and size of indptr array that +// / represents the range of the rows. +// / The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. +// / The length of this array is 1 + (the number of rows), and the type +// / of index value is long. +// / +// / For example, let X be the following 6x4 matrix: +// / ```text +// / X := [[0, 1, 2, 0], +// / [0, 0, 3, 0], +// / [0, 4, 0, 5], +// / [0, 0, 0, 0], +// / [6, 0, 7, 8], +// / [0, 9, 0, 0]]. +// / ``` +// / The array of non-zero values in X is: +// / ```text +// / values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +// / ``` +// / And the indptr of X is: +// / ```text +// / indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +// / ``` func (rcv *SparseMatrixIndexCSX) IndptrBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -108,30 +108,30 @@ func (rcv *SparseMatrixIndexCSX) IndptrBuffer(obj *Buffer) *Buffer { return nil } -/// indptrBuffer stores the location and size of indptr array that -/// represents the range of the rows. -/// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. -/// The length of this array is 1 + (the number of rows), and the type -/// of index value is long. -/// -/// For example, let X be the following 6x4 matrix: -/// ```text -/// X := [[0, 1, 2, 0], -/// [0, 0, 3, 0], -/// [0, 4, 0, 5], -/// [0, 0, 0, 0], -/// [6, 0, 7, 8], -/// [0, 9, 0, 0]]. -/// ``` -/// The array of non-zero values in X is: -/// ```text -/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. -/// ``` -/// And the indptr of X is: -/// ```text -/// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. -/// ``` -/// The type of values in indicesBuffer +// / indptrBuffer stores the location and size of indptr array that +// / represents the range of the rows. +// / The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data. +// / The length of this array is 1 + (the number of rows), and the type +// / of index value is long. +// / +// / For example, let X be the following 6x4 matrix: +// / ```text +// / X := [[0, 1, 2, 0], +// / [0, 0, 3, 0], +// / [0, 4, 0, 5], +// / [0, 0, 0, 0], +// / [6, 0, 7, 8], +// / [0, 9, 0, 0]]. +// / ``` +// / The array of non-zero values in X is: +// / ```text +// / values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. +// / ``` +// / And the indptr of X is: +// / ```text +// / indptr(X) = [0, 2, 3, 5, 5, 8, 10]. +// / ``` +// / The type of values in indicesBuffer func (rcv *SparseMatrixIndexCSX) IndicesType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -145,16 +145,16 @@ func (rcv *SparseMatrixIndexCSX) IndicesType(obj *Int) *Int { return nil } -/// The type of values in indicesBuffer -/// indicesBuffer stores the location and size of the array that -/// contains the column indices of the corresponding non-zero values. -/// The type of index value is long. -/// -/// For example, the indices of the above X is: -/// ```text -/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. -/// ``` -/// Note that the indices are sorted in lexicographical order for each row. +// / The type of values in indicesBuffer +// / indicesBuffer stores the location and size of the array that +// / contains the column indices of the corresponding non-zero values. +// / The type of index value is long. +// / +// / For example, the indices of the above X is: +// / ```text +// / indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +// / ``` +// / Note that the indices are sorted in lexicographical order for each row. func (rcv *SparseMatrixIndexCSX) IndicesBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { @@ -168,15 +168,15 @@ func (rcv *SparseMatrixIndexCSX) IndicesBuffer(obj *Buffer) *Buffer { return nil } -/// indicesBuffer stores the location and size of the array that -/// contains the column indices of the corresponding non-zero values. -/// The type of index value is long. -/// -/// For example, the indices of the above X is: -/// ```text -/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. -/// ``` -/// Note that the indices are sorted in lexicographical order for each row. +// / indicesBuffer stores the location and size of the array that +// / contains the column indices of the corresponding non-zero values. +// / The type of index value is long. +// / +// / For example, the indices of the above X is: +// / ```text +// / indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. +// / ``` +// / Note that the indices are sorted in lexicographical order for each row. func SparseMatrixIndexCSXStart(builder *flatbuffers.Builder) { builder.StartObject(5) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensor.go similarity index 87% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensor.go index 6f3f5579..8f67e1fc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensor.go @@ -54,9 +54,9 @@ func (rcv *SparseTensor) MutateTypeType(n Type) bool { return rcv._tab.MutateByteSlot(4, byte(n)) } -/// The type of data contained in a value cell. -/// Currently only fixed-width value types are supported, -/// no strings or nested types. +// / The type of data contained in a value cell. +// / Currently only fixed-width value types are supported, +// / no strings or nested types. func (rcv *SparseTensor) Type(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -66,10 +66,10 @@ func (rcv *SparseTensor) Type(obj *flatbuffers.Table) bool { return false } -/// The type of data contained in a value cell. -/// Currently only fixed-width value types are supported, -/// no strings or nested types. -/// The dimensions of the tensor, optionally named. +// / The type of data contained in a value cell. +// / Currently only fixed-width value types are supported, +// / no strings or nested types. +// / The dimensions of the tensor, optionally named. func (rcv *SparseTensor) Shape(obj *TensorDim, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -90,8 +90,8 @@ func (rcv *SparseTensor) ShapeLength() int { return 0 } -/// The dimensions of the tensor, optionally named. -/// The number of non-zero values in a sparse tensor. +// / The dimensions of the tensor, optionally named. +// / The number of non-zero values in a sparse tensor. func (rcv *SparseTensor) NonZeroLength() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -100,7 +100,7 @@ func (rcv *SparseTensor) NonZeroLength() int64 { return 0 } -/// The number of non-zero values in a sparse tensor. +// / The number of non-zero values in a sparse tensor. func (rcv *SparseTensor) MutateNonZeroLength(n int64) bool { return rcv._tab.MutateInt64Slot(10, n) } @@ -117,7 +117,7 @@ func (rcv *SparseTensor) MutateSparseIndexType(n SparseTensorIndex) bool { return rcv._tab.MutateByteSlot(12, byte(n)) } -/// Sparse tensor index +// / Sparse tensor index func (rcv *SparseTensor) SparseIndex(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(14)) if o != 0 { @@ -127,8 +127,8 @@ func (rcv *SparseTensor) SparseIndex(obj *flatbuffers.Table) bool { return false } -/// Sparse tensor index -/// The location and size of the tensor's data +// / Sparse tensor index +// / The location and size of the tensor's data func (rcv *SparseTensor) Data(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(16)) if o != 0 { @@ -142,7 +142,7 @@ func (rcv *SparseTensor) Data(obj *Buffer) *Buffer { return nil } -/// The location and size of the tensor's data +// / The location and size of the tensor's data func SparseTensorStart(builder *flatbuffers.Builder) { builder.StartObject(7) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndex.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndex.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndexCOO.go similarity index 62% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndexCOO.go index f8eee99f..bf1c218e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndexCOO.go @@ -22,38 +22,38 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// EXPERIMENTAL: Data structures for sparse tensors -/// Coordinate (COO) format of sparse tensor index. -/// -/// COO's index list are represented as a NxM matrix, -/// where N is the number of non-zero values, -/// and M is the number of dimensions of a sparse tensor. -/// -/// indicesBuffer stores the location and size of the data of this indices -/// matrix. The value type and the stride of the indices matrix is -/// specified in indicesType and indicesStrides fields. -/// -/// For example, let X be a 2x3x4x5 tensor, and it has the following -/// 6 non-zero values: -/// ```text -/// X[0, 1, 2, 0] := 1 -/// X[1, 1, 2, 3] := 2 -/// X[0, 2, 1, 0] := 3 -/// X[0, 1, 3, 0] := 4 -/// X[0, 1, 2, 1] := 5 -/// X[1, 2, 0, 4] := 6 -/// ``` -/// In COO format, the index matrix of X is the following 4x6 matrix: -/// ```text -/// [[0, 0, 0, 0, 1, 1], -/// [1, 1, 1, 2, 1, 2], -/// [2, 2, 3, 1, 2, 0], -/// [0, 1, 0, 0, 3, 4]] -/// ``` -/// When isCanonical is true, the indices is sorted in lexicographical order -/// (row-major order), and it does not have duplicated entries. Otherwise, -/// the indices may not be sorted, or may have duplicated entries. +// / ---------------------------------------------------------------------- +// / EXPERIMENTAL: Data structures for sparse tensors +// / Coordinate (COO) format of sparse tensor index. +// / +// / COO's index list are represented as a NxM matrix, +// / where N is the number of non-zero values, +// / and M is the number of dimensions of a sparse tensor. +// / +// / indicesBuffer stores the location and size of the data of this indices +// / matrix. The value type and the stride of the indices matrix is +// / specified in indicesType and indicesStrides fields. +// / +// / For example, let X be a 2x3x4x5 tensor, and it has the following +// / 6 non-zero values: +// / ```text +// / X[0, 1, 2, 0] := 1 +// / X[1, 1, 2, 3] := 2 +// / X[0, 2, 1, 0] := 3 +// / X[0, 1, 3, 0] := 4 +// / X[0, 1, 2, 1] := 5 +// / X[1, 2, 0, 4] := 6 +// / ``` +// / In COO format, the index matrix of X is the following 4x6 matrix: +// / ```text +// / [[0, 0, 0, 0, 1, 1], +// / [1, 1, 1, 2, 1, 2], +// / [2, 2, 3, 1, 2, 0], +// / [0, 1, 0, 0, 3, 4]] +// / ``` +// / When isCanonical is true, the indices is sorted in lexicographical order +// / (row-major order), and it does not have duplicated entries. Otherwise, +// / the indices may not be sorted, or may have duplicated entries. type SparseTensorIndexCOO struct { _tab flatbuffers.Table } @@ -74,7 +74,7 @@ func (rcv *SparseTensorIndexCOO) Table() flatbuffers.Table { return rcv._tab } -/// The type of values in indicesBuffer +// / The type of values in indicesBuffer func (rcv *SparseTensorIndexCOO) IndicesType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -88,9 +88,9 @@ func (rcv *SparseTensorIndexCOO) IndicesType(obj *Int) *Int { return nil } -/// The type of values in indicesBuffer -/// Non-negative byte offsets to advance one value cell along each dimension -/// If omitted, default to row-major order (C-like). +// / The type of values in indicesBuffer +// / Non-negative byte offsets to advance one value cell along each dimension +// / If omitted, default to row-major order (C-like). func (rcv *SparseTensorIndexCOO) IndicesStrides(j int) int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -108,8 +108,8 @@ func (rcv *SparseTensorIndexCOO) IndicesStridesLength() int { return 0 } -/// Non-negative byte offsets to advance one value cell along each dimension -/// If omitted, default to row-major order (C-like). +// / Non-negative byte offsets to advance one value cell along each dimension +// / If omitted, default to row-major order (C-like). func (rcv *SparseTensorIndexCOO) MutateIndicesStrides(j int, n int64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -119,7 +119,7 @@ func (rcv *SparseTensorIndexCOO) MutateIndicesStrides(j int, n int64) bool { return false } -/// The location and size of the indices matrix's data +// / The location and size of the indices matrix's data func (rcv *SparseTensorIndexCOO) IndicesBuffer(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -133,12 +133,12 @@ func (rcv *SparseTensorIndexCOO) IndicesBuffer(obj *Buffer) *Buffer { return nil } -/// The location and size of the indices matrix's data -/// This flag is true if and only if the indices matrix is sorted in -/// row-major order, and does not have duplicated entries. -/// This sort order is the same as of Tensorflow's SparseTensor, -/// but it is inverse order of SciPy's canonical coo_matrix -/// (SciPy employs column-major order for its coo_matrix). +// / The location and size of the indices matrix's data +// / This flag is true if and only if the indices matrix is sorted in +// / row-major order, and does not have duplicated entries. +// / This sort order is the same as of Tensorflow's SparseTensor, +// / but it is inverse order of SciPy's canonical coo_matrix +// / (SciPy employs column-major order for its coo_matrix). func (rcv *SparseTensorIndexCOO) IsCanonical() bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -147,11 +147,11 @@ func (rcv *SparseTensorIndexCOO) IsCanonical() bool { return false } -/// This flag is true if and only if the indices matrix is sorted in -/// row-major order, and does not have duplicated entries. -/// This sort order is the same as of Tensorflow's SparseTensor, -/// but it is inverse order of SciPy's canonical coo_matrix -/// (SciPy employs column-major order for its coo_matrix). +// / This flag is true if and only if the indices matrix is sorted in +// / row-major order, and does not have duplicated entries. +// / This sort order is the same as of Tensorflow's SparseTensor, +// / but it is inverse order of SciPy's canonical coo_matrix +// / (SciPy employs column-major order for its coo_matrix). func (rcv *SparseTensorIndexCOO) MutateIsCanonical(n bool) bool { return rcv._tab.MutateBoolSlot(10, n) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndexCSF.go similarity index 51% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndexCSF.go index a824c84e..66226e04 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/SparseTensorIndexCSF.go @@ -22,7 +22,7 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Compressed Sparse Fiber (CSF) sparse tensor index. +// / Compressed Sparse Fiber (CSF) sparse tensor index. type SparseTensorIndexCSF struct { _tab flatbuffers.Table } @@ -43,37 +43,37 @@ func (rcv *SparseTensorIndexCSF) Table() flatbuffers.Table { return rcv._tab } -/// CSF is a generalization of compressed sparse row (CSR) index. -/// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) -/// -/// CSF index recursively compresses each dimension of a tensor into a set -/// of prefix trees. Each path from a root to leaf forms one tensor -/// non-zero index. CSF is implemented with two arrays of buffers and one -/// arrays of integers. -/// -/// For example, let X be a 2x3x4x5 tensor and let it have the following -/// 8 non-zero values: -/// ```text -/// X[0, 0, 0, 1] := 1 -/// X[0, 0, 0, 2] := 2 -/// X[0, 1, 0, 0] := 3 -/// X[0, 1, 0, 2] := 4 -/// X[0, 1, 1, 0] := 5 -/// X[1, 1, 1, 0] := 6 -/// X[1, 1, 1, 1] := 7 -/// X[1, 1, 1, 2] := 8 -/// ``` -/// As a prefix tree this would be represented as: -/// ```text -/// 0 1 -/// / \ | -/// 0 1 1 -/// / / \ | -/// 0 0 1 1 -/// /| /| | /| | -/// 1 2 0 2 0 0 1 2 -/// ``` -/// The type of values in indptrBuffers +// / CSF is a generalization of compressed sparse row (CSR) index. +// / See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) +// / +// / CSF index recursively compresses each dimension of a tensor into a set +// / of prefix trees. Each path from a root to leaf forms one tensor +// / non-zero index. CSF is implemented with two arrays of buffers and one +// / arrays of integers. +// / +// / For example, let X be a 2x3x4x5 tensor and let it have the following +// / 8 non-zero values: +// / ```text +// / X[0, 0, 0, 1] := 1 +// / X[0, 0, 0, 2] := 2 +// / X[0, 1, 0, 0] := 3 +// / X[0, 1, 0, 2] := 4 +// / X[0, 1, 1, 0] := 5 +// / X[1, 1, 1, 0] := 6 +// / X[1, 1, 1, 1] := 7 +// / X[1, 1, 1, 2] := 8 +// / ``` +// / As a prefix tree this would be represented as: +// / ```text +// / 0 1 +// / / \ | +// / 0 1 1 +// / / / \ | +// / 0 0 1 1 +// / /| /| | /| | +// / 1 2 0 2 0 0 1 2 +// / ``` +// / The type of values in indptrBuffers func (rcv *SparseTensorIndexCSF) IndptrType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -87,51 +87,51 @@ func (rcv *SparseTensorIndexCSF) IndptrType(obj *Int) *Int { return nil } -/// CSF is a generalization of compressed sparse row (CSR) index. -/// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) -/// -/// CSF index recursively compresses each dimension of a tensor into a set -/// of prefix trees. Each path from a root to leaf forms one tensor -/// non-zero index. CSF is implemented with two arrays of buffers and one -/// arrays of integers. -/// -/// For example, let X be a 2x3x4x5 tensor and let it have the following -/// 8 non-zero values: -/// ```text -/// X[0, 0, 0, 1] := 1 -/// X[0, 0, 0, 2] := 2 -/// X[0, 1, 0, 0] := 3 -/// X[0, 1, 0, 2] := 4 -/// X[0, 1, 1, 0] := 5 -/// X[1, 1, 1, 0] := 6 -/// X[1, 1, 1, 1] := 7 -/// X[1, 1, 1, 2] := 8 -/// ``` -/// As a prefix tree this would be represented as: -/// ```text -/// 0 1 -/// / \ | -/// 0 1 1 -/// / / \ | -/// 0 0 1 1 -/// /| /| | /| | -/// 1 2 0 2 0 0 1 2 -/// ``` -/// The type of values in indptrBuffers -/// indptrBuffers stores the sparsity structure. -/// Each two consecutive dimensions in a tensor correspond to a buffer in -/// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` -/// and `indptrBuffers[dim][i + 1]` signify a range of nodes in -/// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. -/// -/// For example, the indptrBuffers for the above X is: -/// ```text -/// indptrBuffer(X) = [ -/// [0, 2, 3], -/// [0, 1, 3, 4], -/// [0, 2, 4, 5, 8] -/// ]. -/// ``` +// / CSF is a generalization of compressed sparse row (CSR) index. +// / See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf) +// / +// / CSF index recursively compresses each dimension of a tensor into a set +// / of prefix trees. Each path from a root to leaf forms one tensor +// / non-zero index. CSF is implemented with two arrays of buffers and one +// / arrays of integers. +// / +// / For example, let X be a 2x3x4x5 tensor and let it have the following +// / 8 non-zero values: +// / ```text +// / X[0, 0, 0, 1] := 1 +// / X[0, 0, 0, 2] := 2 +// / X[0, 1, 0, 0] := 3 +// / X[0, 1, 0, 2] := 4 +// / X[0, 1, 1, 0] := 5 +// / X[1, 1, 1, 0] := 6 +// / X[1, 1, 1, 1] := 7 +// / X[1, 1, 1, 2] := 8 +// / ``` +// / As a prefix tree this would be represented as: +// / ```text +// / 0 1 +// / / \ | +// / 0 1 1 +// / / / \ | +// / 0 0 1 1 +// / /| /| | /| | +// / 1 2 0 2 0 0 1 2 +// / ``` +// / The type of values in indptrBuffers +// / indptrBuffers stores the sparsity structure. +// / Each two consecutive dimensions in a tensor correspond to a buffer in +// / indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` +// / and `indptrBuffers[dim][i + 1]` signify a range of nodes in +// / `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. +// / +// / For example, the indptrBuffers for the above X is: +// / ```text +// / indptrBuffer(X) = [ +// / [0, 2, 3], +// / [0, 1, 3, 4], +// / [0, 2, 4, 5, 8] +// / ]. +// / ``` func (rcv *SparseTensorIndexCSF) IndptrBuffers(obj *Buffer, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -151,21 +151,21 @@ func (rcv *SparseTensorIndexCSF) IndptrBuffersLength() int { return 0 } -/// indptrBuffers stores the sparsity structure. -/// Each two consecutive dimensions in a tensor correspond to a buffer in -/// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` -/// and `indptrBuffers[dim][i + 1]` signify a range of nodes in -/// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. -/// -/// For example, the indptrBuffers for the above X is: -/// ```text -/// indptrBuffer(X) = [ -/// [0, 2, 3], -/// [0, 1, 3, 4], -/// [0, 2, 4, 5, 8] -/// ]. -/// ``` -/// The type of values in indicesBuffers +// / indptrBuffers stores the sparsity structure. +// / Each two consecutive dimensions in a tensor correspond to a buffer in +// / indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]` +// / and `indptrBuffers[dim][i + 1]` signify a range of nodes in +// / `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node. +// / +// / For example, the indptrBuffers for the above X is: +// / ```text +// / indptrBuffer(X) = [ +// / [0, 2, 3], +// / [0, 1, 3, 4], +// / [0, 2, 4, 5, 8] +// / ]. +// / ``` +// / The type of values in indicesBuffers func (rcv *SparseTensorIndexCSF) IndicesType(obj *Int) *Int { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -179,18 +179,18 @@ func (rcv *SparseTensorIndexCSF) IndicesType(obj *Int) *Int { return nil } -/// The type of values in indicesBuffers -/// indicesBuffers stores values of nodes. -/// Each tensor dimension corresponds to a buffer in indicesBuffers. -/// For example, the indicesBuffers for the above X is: -/// ```text -/// indicesBuffer(X) = [ -/// [0, 1], -/// [0, 1, 1], -/// [0, 0, 1, 1], -/// [1, 2, 0, 2, 0, 0, 1, 2] -/// ]. -/// ``` +// / The type of values in indicesBuffers +// / indicesBuffers stores values of nodes. +// / Each tensor dimension corresponds to a buffer in indicesBuffers. +// / For example, the indicesBuffers for the above X is: +// / ```text +// / indicesBuffer(X) = [ +// / [0, 1], +// / [0, 1, 1], +// / [0, 0, 1, 1], +// / [1, 2, 0, 2, 0, 0, 1, 2] +// / ]. +// / ``` func (rcv *SparseTensorIndexCSF) IndicesBuffers(obj *Buffer, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -210,23 +210,23 @@ func (rcv *SparseTensorIndexCSF) IndicesBuffersLength() int { return 0 } -/// indicesBuffers stores values of nodes. -/// Each tensor dimension corresponds to a buffer in indicesBuffers. -/// For example, the indicesBuffers for the above X is: -/// ```text -/// indicesBuffer(X) = [ -/// [0, 1], -/// [0, 1, 1], -/// [0, 0, 1, 1], -/// [1, 2, 0, 2, 0, 0, 1, 2] -/// ]. -/// ``` -/// axisOrder stores the sequence in which dimensions were traversed to -/// produce the prefix tree. -/// For example, the axisOrder for the above X is: -/// ```text -/// axisOrder(X) = [0, 1, 2, 3]. -/// ``` +// / indicesBuffers stores values of nodes. +// / Each tensor dimension corresponds to a buffer in indicesBuffers. +// / For example, the indicesBuffers for the above X is: +// / ```text +// / indicesBuffer(X) = [ +// / [0, 1], +// / [0, 1, 1], +// / [0, 0, 1, 1], +// / [1, 2, 0, 2, 0, 0, 1, 2] +// / ]. +// / ``` +// / axisOrder stores the sequence in which dimensions were traversed to +// / produce the prefix tree. +// / For example, the axisOrder for the above X is: +// / ```text +// / axisOrder(X) = [0, 1, 2, 3]. +// / ``` func (rcv *SparseTensorIndexCSF) AxisOrder(j int) int32 { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { @@ -244,12 +244,12 @@ func (rcv *SparseTensorIndexCSF) AxisOrderLength() int { return 0 } -/// axisOrder stores the sequence in which dimensions were traversed to -/// produce the prefix tree. -/// For example, the axisOrder for the above X is: -/// ```text -/// axisOrder(X) = [0, 1, 2, 3]. -/// ``` +// / axisOrder stores the sequence in which dimensions were traversed to +// / produce the prefix tree. +// / For example, the axisOrder for the above X is: +// / ```text +// / axisOrder(X) = [0, 1, 2, 3]. +// / ``` func (rcv *SparseTensorIndexCSF) MutateAxisOrder(j int, n int32) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Struct_.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Struct_.go index 427e7060..73752a17 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Struct_.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// A Struct_ in the flatbuffer metadata is the same as an Arrow Struct -/// (according to the physical memory layout). We used Struct_ here as -/// Struct is a reserved word in Flatbuffers +// / A Struct_ in the flatbuffer metadata is the same as an Arrow Struct +// / (according to the physical memory layout). We used Struct_ here as +// / Struct is a reserved word in Flatbuffers type Struct_ struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Tensor.go similarity index 84% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Tensor.go index 39d70e35..47bfe806 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Tensor.go @@ -54,8 +54,8 @@ func (rcv *Tensor) MutateTypeType(n Type) bool { return rcv._tab.MutateByteSlot(4, byte(n)) } -/// The type of data contained in a value cell. Currently only fixed-width -/// value types are supported, no strings or nested types +// / The type of data contained in a value cell. Currently only fixed-width +// / value types are supported, no strings or nested types func (rcv *Tensor) Type(obj *flatbuffers.Table) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -65,9 +65,9 @@ func (rcv *Tensor) Type(obj *flatbuffers.Table) bool { return false } -/// The type of data contained in a value cell. Currently only fixed-width -/// value types are supported, no strings or nested types -/// The dimensions of the tensor, optionally named +// / The type of data contained in a value cell. Currently only fixed-width +// / value types are supported, no strings or nested types +// / The dimensions of the tensor, optionally named func (rcv *Tensor) Shape(obj *TensorDim, j int) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(8)) if o != 0 { @@ -88,9 +88,9 @@ func (rcv *Tensor) ShapeLength() int { return 0 } -/// The dimensions of the tensor, optionally named -/// Non-negative byte offsets to advance one value cell along each dimension -/// If omitted, default to row-major order (C-like). +// / The dimensions of the tensor, optionally named +// / Non-negative byte offsets to advance one value cell along each dimension +// / If omitted, default to row-major order (C-like). func (rcv *Tensor) Strides(j int) int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -108,8 +108,8 @@ func (rcv *Tensor) StridesLength() int { return 0 } -/// Non-negative byte offsets to advance one value cell along each dimension -/// If omitted, default to row-major order (C-like). +// / Non-negative byte offsets to advance one value cell along each dimension +// / If omitted, default to row-major order (C-like). func (rcv *Tensor) MutateStrides(j int, n int64) bool { o := flatbuffers.UOffsetT(rcv._tab.Offset(10)) if o != 0 { @@ -119,7 +119,7 @@ func (rcv *Tensor) MutateStrides(j int, n int64) bool { return false } -/// The location and size of the tensor's data +// / The location and size of the tensor's data func (rcv *Tensor) Data(obj *Buffer) *Buffer { o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) if o != 0 { @@ -133,7 +133,7 @@ func (rcv *Tensor) Data(obj *Buffer) *Buffer { return nil } -/// The location and size of the tensor's data +// / The location and size of the tensor's data func TensorStart(builder *flatbuffers.Builder) { builder.StartObject(5) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/TensorDim.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/TensorDim.go index 14b82120..c6413b6a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/TensorDim.go @@ -22,9 +22,9 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// ---------------------------------------------------------------------- -/// Data structures for dense tensors -/// Shape data for a single axis in a tensor +// / ---------------------------------------------------------------------- +// / Data structures for dense tensors +// / Shape data for a single axis in a tensor type TensorDim struct { _tab flatbuffers.Table } @@ -45,7 +45,7 @@ func (rcv *TensorDim) Table() flatbuffers.Table { return rcv._tab } -/// Length of dimension +// / Length of dimension func (rcv *TensorDim) Size() int64 { o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) if o != 0 { @@ -54,12 +54,12 @@ func (rcv *TensorDim) Size() int64 { return 0 } -/// Length of dimension +// / Length of dimension func (rcv *TensorDim) MutateSize(n int64) bool { return rcv._tab.MutateInt64Slot(4, n) } -/// Name of the dimension, optional +// / Name of the dimension, optional func (rcv *TensorDim) Name() []byte { o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) if o != 0 { @@ -68,7 +68,7 @@ func (rcv *TensorDim) Name() []byte { return nil } -/// Name of the dimension, optional +// / Name of the dimension, optional func TensorDimStart(builder *flatbuffers.Builder) { builder.StartObject(2) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Time.go similarity index 73% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Time.go index 2fb6e4c1..13038a6e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Time.go @@ -22,20 +22,20 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Time is either a 32-bit or 64-bit signed integer type representing an -/// elapsed time since midnight, stored in either of four units: seconds, -/// milliseconds, microseconds or nanoseconds. -/// -/// The integer `bitWidth` depends on the `unit` and must be one of the following: -/// * SECOND and MILLISECOND: 32 bits -/// * MICROSECOND and NANOSECOND: 64 bits -/// -/// The allowed values are between 0 (inclusive) and 86400 (=24*60*60) seconds -/// (exclusive), adjusted for the time unit (for example, up to 86400000 -/// exclusive for the MILLISECOND unit). -/// This definition doesn't allow for leap seconds. Time values from -/// measurements with leap seconds will need to be corrected when ingesting -/// into Arrow (for example by replacing the value 86400 with 86399). +// / Time is either a 32-bit or 64-bit signed integer type representing an +// / elapsed time since midnight, stored in either of four units: seconds, +// / milliseconds, microseconds or nanoseconds. +// / +// / The integer `bitWidth` depends on the `unit` and must be one of the following: +// / * SECOND and MILLISECOND: 32 bits +// / * MICROSECOND and NANOSECOND: 64 bits +// / +// / The allowed values are between 0 (inclusive) and 86400 (=24*60*60) seconds +// / (exclusive), adjusted for the time unit (for example, up to 86400000 +// / exclusive for the MILLISECOND unit). +// / This definition doesn't allow for leap seconds. Time values from +// / measurements with leap seconds will need to be corrected when ingesting +// / into Arrow (for example by replacing the value 86400 with 86399). type Time struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/TimeUnit.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/TimeUnit.go diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Timestamp.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Timestamp.go new file mode 100644 index 00000000..ce172bac --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Timestamp.go @@ -0,0 +1,201 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +// / Timestamp is a 64-bit signed integer representing an elapsed time since a +// / fixed epoch, stored in either of four units: seconds, milliseconds, +// / microseconds or nanoseconds, and is optionally annotated with a timezone. +// / +// / Timestamp values do not include any leap seconds (in other words, all +// / days are considered 86400 seconds long). +// / +// / Timestamps with a non-empty timezone +// / ------------------------------------ +// / +// / If a Timestamp column has a non-empty timezone value, its epoch is +// / 1970-01-01 00:00:00 (January 1st 1970, midnight) in the *UTC* timezone +// / (the Unix epoch), regardless of the Timestamp's own timezone. +// / +// / Therefore, timestamp values with a non-empty timezone correspond to +// / physical points in time together with some additional information about +// / how the data was obtained and/or how to display it (the timezone). +// / +// / For example, the timestamp value 0 with the timezone string "Europe/Paris" +// / corresponds to "January 1st 1970, 00h00" in the UTC timezone, but the +// / application may prefer to display it as "January 1st 1970, 01h00" in +// / the Europe/Paris timezone (which is the same physical point in time). +// / +// / One consequence is that timestamp values with a non-empty timezone +// / can be compared and ordered directly, since they all share the same +// / well-known point of reference (the Unix epoch). +// / +// / Timestamps with an unset / empty timezone +// / ----------------------------------------- +// / +// / If a Timestamp column has no timezone value, its epoch is +// / 1970-01-01 00:00:00 (January 1st 1970, midnight) in an *unknown* timezone. +// / +// / Therefore, timestamp values without a timezone cannot be meaningfully +// / interpreted as physical points in time, but only as calendar / clock +// / indications ("wall clock time") in an unspecified timezone. +// / +// / For example, the timestamp value 0 with an empty timezone string +// / corresponds to "January 1st 1970, 00h00" in an unknown timezone: there +// / is not enough information to interpret it as a well-defined physical +// / point in time. +// / +// / One consequence is that timestamp values without a timezone cannot +// / be reliably compared or ordered, since they may have different points of +// / reference. In particular, it is *not* possible to interpret an unset +// / or empty timezone as the same as "UTC". +// / +// / Conversion between timezones +// / ---------------------------- +// / +// / If a Timestamp column has a non-empty timezone, changing the timezone +// / to a different non-empty value is a metadata-only operation: +// / the timestamp values need not change as their point of reference remains +// / the same (the Unix epoch). +// / +// / However, if a Timestamp column has no timezone value, changing it to a +// / non-empty value requires to think about the desired semantics. +// / One possibility is to assume that the original timestamp values are +// / relative to the epoch of the timezone being set; timestamp values should +// / then adjusted to the Unix epoch (for example, changing the timezone from +// / empty to "Europe/Paris" would require converting the timestamp values +// / from "Europe/Paris" to "UTC", which seems counter-intuitive but is +// / nevertheless correct). +// / +// / Guidelines for encoding data from external libraries +// / ---------------------------------------------------- +// / +// / Date & time libraries often have multiple different data types for temporal +// / data. In order to ease interoperability between different implementations the +// / Arrow project has some recommendations for encoding these types into a Timestamp +// / column. +// / +// / An "instant" represents a physical point in time that has no relevant timezone +// / (for example, astronomical data). To encode an instant, use a Timestamp with +// / the timezone string set to "UTC", and make sure the Timestamp values +// / are relative to the UTC epoch (January 1st 1970, midnight). +// / +// / A "zoned date-time" represents a physical point in time annotated with an +// / informative timezone (for example, the timezone in which the data was +// / recorded). To encode a zoned date-time, use a Timestamp with the timezone +// / string set to the name of the timezone, and make sure the Timestamp values +// / are relative to the UTC epoch (January 1st 1970, midnight). +// / +// / (There is some ambiguity between an instant and a zoned date-time with the +// / UTC timezone. Both of these are stored the same in Arrow. Typically, +// / this distinction does not matter. If it does, then an application should +// / use custom metadata or an extension type to distinguish between the two cases.) +// / +// / An "offset date-time" represents a physical point in time combined with an +// / explicit offset from UTC. To encode an offset date-time, use a Timestamp +// / with the timezone string set to the numeric timezone offset string +// / (e.g. "+03:00"), and make sure the Timestamp values are relative to +// / the UTC epoch (January 1st 1970, midnight). +// / +// / A "naive date-time" (also called "local date-time" in some libraries) +// / represents a wall clock time combined with a calendar date, but with +// / no indication of how to map this information to a physical point in time. +// / Naive date-times must be handled with care because of this missing +// / information, and also because daylight saving time (DST) may make +// / some values ambiguous or nonexistent. A naive date-time may be +// / stored as a struct with Date and Time fields. However, it may also be +// / encoded into a Timestamp column with an empty timezone. The timestamp +// / values should be computed "as if" the timezone of the date-time values +// / was UTC; for example, the naive date-time "January 1st 1970, 00h00" would +// / be encoded as timestamp value 0. +type Timestamp struct { + _tab flatbuffers.Table +} + +func GetRootAsTimestamp(buf []byte, offset flatbuffers.UOffsetT) *Timestamp { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Timestamp{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Timestamp) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Timestamp) Table() flatbuffers.Table { + return rcv._tab +} + +func (rcv *Timestamp) Unit() TimeUnit { + o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) + if o != 0 { + return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos)) + } + return 0 +} + +func (rcv *Timestamp) MutateUnit(n TimeUnit) bool { + return rcv._tab.MutateInt16Slot(4, int16(n)) +} + +// / The timezone is an optional string indicating the name of a timezone, +// / one of: +// / +// / * As used in the Olson timezone database (the "tz database" or +// / "tzdata"), such as "America/New_York". +// / * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", +// / such as "+07:30". +// / +// / Whether a timezone string is present indicates different semantics about +// / the data (see above). +func (rcv *Timestamp) Timezone() []byte { + o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) + if o != 0 { + return rcv._tab.ByteVector(o + rcv._tab.Pos) + } + return nil +} + +// / The timezone is an optional string indicating the name of a timezone, +// / one of: +// / +// / * As used in the Olson timezone database (the "tz database" or +// / "tzdata"), such as "America/New_York". +// / * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", +// / such as "+07:30". +// / +// / Whether a timezone string is present indicates different semantics about +// / the data (see above). +func TimestampStart(builder *flatbuffers.Builder) { + builder.StartObject(2) +} +func TimestampAddUnit(builder *flatbuffers.Builder, unit TimeUnit) { + builder.PrependInt16Slot(0, int16(unit), 0) +} +func TimestampAddTimezone(builder *flatbuffers.Builder, timezone flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(timezone), 0) +} +func TimestampEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Type.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Type.go index ab2bce9c..df8ba865 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Type.go @@ -20,9 +20,9 @@ package flatbuf import "strconv" -/// ---------------------------------------------------------------------- -/// Top-level Type value, enabling extensible type-specific metadata. We can -/// add new logical types to Type without breaking backwards compatibility +// / ---------------------------------------------------------------------- +// / Top-level Type value, enabling extensible type-specific metadata. We can +// / add new logical types to Type without breaking backwards compatibility type Type byte const ( diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Union.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Union.go index e34121d4..0367fb3c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Union.go @@ -22,10 +22,10 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// A union is a complex type with children in Field -/// By default ids in the type vector refer to the offsets in the children -/// optionally typeIds provides an indirection between the child offset and the type id -/// for each child `typeIds[offset]` is the id used in the type vector +// / A union is a complex type with children in Field +// / By default ids in the type vector refer to the offsets in the children +// / optionally typeIds provides an indirection between the child offset and the type id +// / for each child `typeIds[offset]` is the id used in the type vector type Union struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/UnionMode.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/UnionMode.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Utf8.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Utf8.go index 4ff365a3..cab4ce77 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Utf8.go @@ -22,7 +22,7 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Unicode with UTF-8 encoding +// / Unicode with UTF-8 encoding type Utf8 struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Utf8View.go similarity index 76% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Utf8View.go index 9cf82149..f294126a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/flatbuf/Utf8View.go @@ -22,13 +22,13 @@ import ( flatbuffers "github.com/google/flatbuffers/go" ) -/// Logically the same as Utf8, but the internal representation uses a view -/// struct that contains the string length and either the string's entire data -/// inline (for small strings) or an inlined prefix, an index of another buffer, -/// and an offset pointing to a slice in that buffer (for non-small strings). -/// -/// Since it uses a variable number of data buffers, each Field with this type -/// must have a corresponding entry in `variadicBufferCounts`. +// / Logically the same as Utf8, but the internal representation uses a view +// / struct that contains the string length and either the string's entire data +// / inline (for small strings) or an inlined prefix, an index of another buffer, +// / and an offset pointing to a slice in that buffer (for non-small strings). +// / +// / Since it uses a variable number of data buffers, each Field with this type +// / must have a corresponding entry in `variadicBufferCounts`. type Utf8View struct { _tab flatbuffers.Table } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/utils.go similarity index 80% rename from vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go rename to vendor/github.com/apache/arrow-go/v18/arrow/internal/utils.go index 265f030d..d471e62a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/utils.go @@ -17,8 +17,8 @@ package internal import ( - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" ) const CurMetadataVersion = flatbuf.MetadataVersionV5 @@ -45,3 +45,15 @@ func HasValidityBitmap(id arrow.Type, version flatbuf.MetadataVersion) bool { } return true } + +// HasBufferSizesBuffer returns whether a given type has an extra buffer +// in the C ABI to store the sizes of other buffers. Currently this is only +// StringView and BinaryView. +func HasBufferSizesBuffer(id arrow.Type) bool { + switch id { + case arrow.STRING_VIEW, arrow.BINARY_VIEW: + return true + default: + return false + } +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/compression.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/compression.go index 73fb9165..f74510a5 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/compression.go @@ -19,9 +19,9 @@ package ipc import ( "io" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" "github.com/klauspost/compress/zstd" "github.com/pierrec/lz4/v4" ) @@ -104,7 +104,9 @@ type lz4Decompressor struct { *lz4.Reader } -func (z *lz4Decompressor) Close() {} +func (z *lz4Decompressor) Close() { + z.Reader.Reset(nil) +} func getDecompressor(codec flatbuf.CompressionType) decompressor { switch codec { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/endian_swap.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/endian_swap.go index d98fec10..80d8b3cc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/endian_swap.go @@ -18,11 +18,12 @@ package ipc import ( "errors" + "fmt" "math/bits" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/memory" ) // swap the endianness of the array's buffers as needed in-place to save @@ -119,7 +120,10 @@ func swapType(dt arrow.DataType, data *array.Data) (err error) { return swapType(dt.IndexType, data) case arrow.FixedWidthDataType: byteSwapBuffer(dt.BitWidth(), data.Buffers()[1]) + default: + err = fmt.Errorf("%w: swapping endianness of %s", arrow.ErrNotImplemented, dt) } + return } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_reader.go similarity index 62% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_reader.go index 10cb2cae..9135529d 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_reader.go @@ -23,26 +23,138 @@ import ( "fmt" "io" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/endian" - "github.com/apache/arrow/go/v14/arrow/internal" - "github.com/apache/arrow/go/v14/arrow/internal/dictutils" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" ) -// FileReader is an Arrow file reader. -type FileReader struct { +type readerImpl interface { + getFooterEnd() (int64, error) + getBytes(offset, length int64) ([]byte, error) + dict(memory.Allocator, *footerBlock, int) (dataBlock, error) + block(memory.Allocator, *footerBlock, int) (dataBlock, error) +} + +type footerBlock struct { + offset int64 + buffer *memory.Buffer + data *flatbuf.Footer +} + +type dataBlock interface { + Offset() int64 + Meta() int32 + Body() int64 + NewMessage() (*Message, error) +} + +const footerSizeLen = 4 + +var minimumOffsetSize = int64(len(Magic)*2 + footerSizeLen) + +type basicReaderImpl struct { r ReadAtSeeker +} + +func (r *basicReaderImpl) getBytes(offset, len int64) ([]byte, error) { + buf := make([]byte, len) + n, err := r.r.ReadAt(buf, offset) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read %d bytes at offset %d: %w", len, offset, err) + } + if int64(n) != len { + return nil, fmt.Errorf("arrow/ipc: could not read %d bytes at offset %d", len, offset) + } + return buf, nil +} + +func (r *basicReaderImpl) getFooterEnd() (int64, error) { + return r.r.Seek(0, io.SeekEnd) +} + +func (r *basicReaderImpl) block(mem memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.RecordBatches(&blk, i) { + return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract file block %d", i) + } + + return fileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + r: r.r, + mem: mem, + }, nil +} + +func (r *basicReaderImpl) dict(mem memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.Dictionaries(&blk, i) { + return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract dictionary block %d", i) + } - footer struct { - offset int64 - buffer *memory.Buffer - data *flatbuf.Footer + return fileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + r: r.r, + mem: mem, + }, nil +} + +type mappedReaderImpl struct { + data []byte +} + +func (r *mappedReaderImpl) getBytes(offset, length int64) ([]byte, error) { + if offset < 0 || offset+int64(length) > int64(len(r.data)) { + return nil, fmt.Errorf("arrow/ipc: invalid offset=%d or length=%d", offset, length) } + return r.data[offset : offset+length], nil +} + +func (r *mappedReaderImpl) getFooterEnd() (int64, error) { return int64(len(r.data)), nil } + +func (r *mappedReaderImpl) block(_ memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.RecordBatches(&blk, i) { + return mappedFileBlock{}, fmt.Errorf("arrow/ipc: could not extract file block %d", i) + } + + return mappedFileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + data: r.data, + }, nil +} + +func (r *mappedReaderImpl) dict(_ memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.Dictionaries(&blk, i) { + return mappedFileBlock{}, fmt.Errorf("arrow/ipc: could not extract dictionary block %d", i) + } + + return mappedFileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + data: r.data, + }, nil +} + +// FileReader is an Arrow file reader. +type FileReader struct { + r readerImpl + + footer footerBlock + // fields dictTypeMap memo dictutils.Memo @@ -56,81 +168,71 @@ type FileReader struct { swapEndianness bool } +// NewMappedFileReader is like NewFileReader but instead of using a ReadAtSeeker, +// which will force copies through the Read/ReadAt methods, it uses a byte slice +// and pulls slices directly from the data. This is useful specifically when +// dealing with mmapped data so that you can lazily load the buffers and avoid +// extraneous copies. The slices used for the record column buffers will simply +// reference the existing data instead of performing copies via ReadAt/Read. +// +// For example, syscall.Mmap returns a byte slice which could be referencing +// a shared memory region or otherwise a memory-mapped file. +func NewMappedFileReader(data []byte, opts ...Option) (*FileReader, error) { + var ( + cfg = newConfig(opts...) + f = FileReader{ + r: &mappedReaderImpl{data: data}, + mem: cfg.alloc, + } + ) + + if err := f.init(cfg); err != nil { + return nil, err + } + return &f, nil +} + // NewFileReader opens an Arrow file using the provided reader r. func NewFileReader(r ReadAtSeeker, opts ...Option) (*FileReader, error) { var ( cfg = newConfig(opts...) - err error - - f = FileReader{ - r: r, + f = FileReader{ + r: &basicReaderImpl{r: r}, memo: dictutils.NewMemo(), mem: cfg.alloc, } ) + if err := f.init(cfg); err != nil { + return nil, err + } + return &f, nil +} + +func (f *FileReader) init(cfg *config) error { + var err error if cfg.footer.offset <= 0 { - cfg.footer.offset, err = f.r.Seek(0, io.SeekEnd) + cfg.footer.offset, err = f.r.getFooterEnd() if err != nil { - return nil, fmt.Errorf("arrow/ipc: could retrieve footer offset: %w", err) + return fmt.Errorf("arrow/ipc: could retrieve footer offset: %w", err) } } f.footer.offset = cfg.footer.offset err = f.readFooter() if err != nil { - return nil, fmt.Errorf("arrow/ipc: could not decode footer: %w", err) + return fmt.Errorf("arrow/ipc: could not decode footer: %w", err) } err = f.readSchema(cfg.ensureNativeEndian) if err != nil { - return nil, fmt.Errorf("arrow/ipc: could not decode schema: %w", err) + return fmt.Errorf("arrow/ipc: could not decode schema: %w", err) } if cfg.schema != nil && !cfg.schema.Equal(f.schema) { - return nil, fmt.Errorf("arrow/ipc: inconsistent schema for reading (got: %v, want: %v)", f.schema, cfg.schema) - } - - return &f, err -} - -func (f *FileReader) readFooter() error { - var err error - - if f.footer.offset <= int64(len(Magic)*2+4) { - return fmt.Errorf("arrow/ipc: file too small (size=%d)", f.footer.offset) + return fmt.Errorf("arrow/ipc: inconsistent schema for reading (got: %v, want: %v)", f.schema, cfg.schema) } - eof := int64(len(Magic) + 4) - buf := make([]byte, eof) - n, err := f.r.ReadAt(buf, f.footer.offset-eof) - if err != nil { - return fmt.Errorf("arrow/ipc: could not read footer: %w", err) - } - if n != len(buf) { - return fmt.Errorf("arrow/ipc: could not read %d bytes from end of file", len(buf)) - } - - if !bytes.Equal(buf[4:], Magic) { - return errNotArrowFile - } - - size := int64(binary.LittleEndian.Uint32(buf[:4])) - if size <= 0 || size+int64(len(Magic)*2+4) > f.footer.offset { - return errInconsistentFileMetadata - } - - buf = make([]byte, size) - n, err = f.r.ReadAt(buf, f.footer.offset-size-eof) - if err != nil { - return fmt.Errorf("arrow/ipc: could not read footer data: %w", err) - } - if n != len(buf) { - return fmt.Errorf("arrow/ipc: could not read %d bytes from footer data", len(buf)) - } - - f.footer.buffer = memory.NewBufferBytes(buf) - f.footer.data = flatbuf.GetRootAsFooter(buf, 0) return err } @@ -155,17 +257,17 @@ func (f *FileReader) readSchema(ensureNativeEndian bool) error { } for i := 0; i < f.NumDictionaries(); i++ { - blk, err := f.dict(i) + blk, err := f.r.dict(f.mem, &f.footer, i) if err != nil { return fmt.Errorf("arrow/ipc: could not read dictionary[%d]: %w", i, err) } switch { - case !bitutil.IsMultipleOf8(blk.Offset): - return fmt.Errorf("arrow/ipc: invalid file offset=%d for dictionary %d", blk.Offset, i) - case !bitutil.IsMultipleOf8(int64(blk.Meta)): - return fmt.Errorf("arrow/ipc: invalid file metadata=%d position for dictionary %d", blk.Meta, i) - case !bitutil.IsMultipleOf8(blk.Body): - return fmt.Errorf("arrow/ipc: invalid file body=%d position for dictionary %d", blk.Body, i) + case !bitutil.IsMultipleOf8(blk.Offset()): + return fmt.Errorf("arrow/ipc: invalid file offset=%d for dictionary %d", blk.Offset(), i) + case !bitutil.IsMultipleOf8(int64(blk.Meta())): + return fmt.Errorf("arrow/ipc: invalid file metadata=%d position for dictionary %d", blk.Meta(), i) + case !bitutil.IsMultipleOf8(blk.Body()): + return fmt.Errorf("arrow/ipc: invalid file body=%d position for dictionary %d", blk.Body(), i) } msg, err := blk.NewMessage() @@ -173,7 +275,7 @@ func (f *FileReader) readSchema(ensureNativeEndian bool) error { return err } - kind, err = readDictionary(&f.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), f.swapEndianness, f.mem) + kind, err = readDictionary(&f.memo, msg.meta, msg.body, f.swapEndianness, f.mem) if err != nil { return err } @@ -185,34 +287,34 @@ func (f *FileReader) readSchema(ensureNativeEndian bool) error { return err } -func (f *FileReader) block(i int) (fileBlock, error) { - var blk flatbuf.Block - if !f.footer.data.RecordBatches(&blk, i) { - return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract file block %d", i) +func (f *FileReader) readFooter() error { + if f.footer.offset <= minimumOffsetSize { + return fmt.Errorf("arrow/ipc: file too small (size=%d)", f.footer.offset) } - return fileBlock{ - Offset: blk.Offset(), - Meta: blk.MetaDataLength(), - Body: blk.BodyLength(), - r: f.r, - mem: f.mem, - }, nil -} + eof := int64(len(Magic) + footerSizeLen) + buf, err := f.r.getBytes(f.footer.offset-eof, eof) + if err != nil { + return err + } -func (f *FileReader) dict(i int) (fileBlock, error) { - var blk flatbuf.Block - if !f.footer.data.Dictionaries(&blk, i) { - return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract dictionary block %d", i) + if !bytes.Equal(buf[4:], Magic) { + return errNotArrowFile } - return fileBlock{ - Offset: blk.Offset(), - Meta: blk.MetaDataLength(), - Body: blk.BodyLength(), - r: f.r, - mem: f.mem, - }, nil + size := int64(binary.LittleEndian.Uint32(buf[:footerSizeLen])) + if size <= 0 || size+minimumOffsetSize > f.footer.offset { + return errInconsistentFileMetadata + } + + buf, err = f.r.getBytes(f.footer.offset-size-eof, size) + if err != nil { + return err + } + + f.footer.buffer = memory.NewBufferBytes(buf) + f.footer.data = flatbuf.GetRootAsFooter(buf, 0) + return nil } func (f *FileReader) Schema() *arrow.Schema { @@ -278,17 +380,17 @@ func (f *FileReader) RecordAt(i int) (arrow.Record, error) { panic("arrow/ipc: record index out of bounds") } - blk, err := f.block(i) + blk, err := f.r.block(f.mem, &f.footer, i) if err != nil { return nil, err } switch { - case !bitutil.IsMultipleOf8(blk.Offset): - return nil, fmt.Errorf("arrow/ipc: invalid file offset=%d for record %d", blk.Offset, i) - case !bitutil.IsMultipleOf8(int64(blk.Meta)): - return nil, fmt.Errorf("arrow/ipc: invalid file metadata=%d position for record %d", blk.Meta, i) - case !bitutil.IsMultipleOf8(blk.Body): - return nil, fmt.Errorf("arrow/ipc: invalid file body=%d position for record %d", blk.Body, i) + case !bitutil.IsMultipleOf8(blk.Offset()): + return nil, fmt.Errorf("arrow/ipc: invalid file offset=%d for record %d", blk.Offset(), i) + case !bitutil.IsMultipleOf8(int64(blk.Meta())): + return nil, fmt.Errorf("arrow/ipc: invalid file metadata=%d position for record %d", blk.Meta(), i) + case !bitutil.IsMultipleOf8(blk.Body()): + return nil, fmt.Errorf("arrow/ipc: invalid file body=%d position for record %d", blk.Body(), i) } msg, err := blk.NewMessage() @@ -301,7 +403,7 @@ func (f *FileReader) RecordAt(i int) (arrow.Record, error) { return nil, fmt.Errorf("arrow/ipc: message %d is not a Record", i) } - return newRecord(f.schema, &f.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), f.swapEndianness, f.mem), nil + return newRecord(f.schema, &f.memo, msg.meta, msg.body, f.swapEndianness, f.mem), nil } // Read reads the current record from the underlying stream and an error, if any. @@ -323,7 +425,7 @@ func (f *FileReader) ReadAt(i int64) (arrow.Record, error) { return f.Record(int(i)) } -func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker, swapEndianness bool, mem memory.Allocator) arrow.Record { +func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, body *memory.Buffer, swapEndianness bool, mem memory.Allocator) arrow.Record { var ( msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0) md flatbuf.RecordBatch @@ -340,10 +442,10 @@ func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, ctx := &arrayLoaderContext{ src: ipcSource{ - meta: &md, - r: body, - codec: codec, - mem: mem, + meta: &md, + rawBytes: body, + codec: codec, + mem: mem, }, memo: memo, max: kMaxNestingDepth, @@ -351,9 +453,9 @@ func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, } pos := dictutils.NewFieldPos() - cols := make([]arrow.Array, len(schema.Fields())) - for i, field := range schema.Fields() { - data := ctx.loadArray(field.Type) + cols := make([]arrow.Array, schema.NumFields()) + for i := 0; i < schema.NumFields(); i++ { + data := ctx.loadArray(schema.Field(i).Type) defer data.Release() if err := dictutils.ResolveFieldDict(memo, data, pos.Child(int32(i)), mem); err != nil { @@ -372,10 +474,10 @@ func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, } type ipcSource struct { - meta *flatbuf.RecordBatch - r ReadAtSeeker - codec decompressor - mem memory.Allocator + meta *flatbuf.RecordBatch + rawBytes *memory.Buffer + codec decompressor + mem memory.Allocator } func (src *ipcSource) buffer(i int) *memory.Buffer { @@ -388,34 +490,23 @@ func (src *ipcSource) buffer(i int) *memory.Buffer { return memory.NewBufferBytes(nil) } - raw := memory.NewResizableBuffer(src.mem) + var raw *memory.Buffer if src.codec == nil { - raw.Resize(int(buf.Length())) - _, err := src.r.ReadAt(raw.Bytes(), buf.Offset()) - if err != nil { - panic(err) - } + raw = memory.SliceBuffer(src.rawBytes, int(buf.Offset()), int(buf.Length())) } else { - sr := io.NewSectionReader(src.r, buf.Offset(), buf.Length()) - var uncompressedSize uint64 + body := src.rawBytes.Bytes()[buf.Offset() : buf.Offset()+buf.Length()] + uncompressedSize := int64(binary.LittleEndian.Uint64(body[:8])) - err := binary.Read(sr, binary.LittleEndian, &uncompressedSize) - if err != nil { - panic(err) - } - - var r io.Reader = sr // check for an uncompressed buffer - if int64(uncompressedSize) != -1 { + if uncompressedSize != -1 { + raw = memory.NewResizableBuffer(src.mem) raw.Resize(int(uncompressedSize)) - src.codec.Reset(sr) - r = src.codec + src.codec.Reset(bytes.NewReader(body[8:])) + if _, err := io.ReadFull(src.codec, raw.Bytes()); err != nil { + panic(err) + } } else { - raw.Resize(int(buf.Length() - 8)) - } - - if _, err = io.ReadFull(r, raw.Bytes()); err != nil { - panic(err) + raw = memory.SliceBuffer(src.rawBytes, int(buf.Offset())+8, int(buf.Length())-8) } } @@ -430,13 +521,18 @@ func (src *ipcSource) fieldMetadata(i int) *flatbuf.FieldNode { return &node } +func (src *ipcSource) variadicCount(i int) int64 { + return src.meta.VariadicBufferCounts(i) +} + type arrayLoaderContext struct { - src ipcSource - ifield int - ibuffer int - max int - memo *dictutils.Memo - version MetadataVersion + src ipcSource + ifield int + ibuffer int + ivariadic int + max int + memo *dictutils.Memo + version MetadataVersion } func (ctx *arrayLoaderContext) field() *flatbuf.FieldNode { @@ -451,6 +547,12 @@ func (ctx *arrayLoaderContext) buffer() *memory.Buffer { return buf } +func (ctx *arrayLoaderContext) variadic() int64 { + v := ctx.src.variadicCount(ctx.ivariadic) + ctx.ivariadic++ + return v +} + func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData { switch dt := dt.(type) { case *arrow.NullType: @@ -465,7 +567,7 @@ func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData { *arrow.Int8Type, *arrow.Int16Type, *arrow.Int32Type, *arrow.Int64Type, *arrow.Uint8Type, *arrow.Uint16Type, *arrow.Uint32Type, *arrow.Uint64Type, *arrow.Float16Type, *arrow.Float32Type, *arrow.Float64Type, - *arrow.Decimal128Type, *arrow.Decimal256Type, + arrow.DecimalType, *arrow.Time32Type, *arrow.Time64Type, *arrow.TimestampType, *arrow.Date32Type, *arrow.Date64Type, @@ -476,6 +578,9 @@ func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData { case *arrow.BinaryType, *arrow.StringType, *arrow.LargeStringType, *arrow.LargeBinaryType: return ctx.loadBinary(dt) + case arrow.BinaryViewDataType: + return ctx.loadBinaryView(dt) + case *arrow.FixedSizeBinaryType: return ctx.loadFixedSizeBinary(dt) @@ -507,7 +612,7 @@ func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData { case *arrow.RunEndEncodedType: field, buffers := ctx.loadCommon(dt.ID(), 1) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) runEnds := ctx.loadChild(dt.RunEnds()) defer runEnds.Release() @@ -569,7 +674,7 @@ func (ctx *arrayLoaderContext) loadPrimitive(dt arrow.DataType) arrow.ArrayData buffers = append(buffers, ctx.buffer()) } - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) } @@ -577,7 +682,19 @@ func (ctx *arrayLoaderContext) loadPrimitive(dt arrow.DataType) arrow.ArrayData func (ctx *arrayLoaderContext) loadBinary(dt arrow.DataType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 3) buffers = append(buffers, ctx.buffer(), ctx.buffer()) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadBinaryView(dt arrow.DataType) arrow.ArrayData { + nVariadicBufs := ctx.variadic() + field, buffers := ctx.loadCommon(dt.ID(), 2+int(nVariadicBufs)) + buffers = append(buffers, ctx.buffer()) + for i := 0; i < int(nVariadicBufs); i++ { + buffers = append(buffers, ctx.buffer()) + } + defer memory.ReleaseBuffers(buffers) return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) } @@ -585,7 +702,7 @@ func (ctx *arrayLoaderContext) loadBinary(dt arrow.DataType) arrow.ArrayData { func (ctx *arrayLoaderContext) loadFixedSizeBinary(dt *arrow.FixedSizeBinaryType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 2) buffers = append(buffers, ctx.buffer()) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) } @@ -593,7 +710,7 @@ func (ctx *arrayLoaderContext) loadFixedSizeBinary(dt *arrow.FixedSizeBinaryType func (ctx *arrayLoaderContext) loadMap(dt *arrow.MapType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 2) buffers = append(buffers, ctx.buffer()) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) sub := ctx.loadChild(dt.Elem()) defer sub.Release() @@ -604,7 +721,7 @@ func (ctx *arrayLoaderContext) loadMap(dt *arrow.MapType) arrow.ArrayData { func (ctx *arrayLoaderContext) loadList(dt arrow.ListLikeType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 2) buffers = append(buffers, ctx.buffer()) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) sub := ctx.loadChild(dt.Elem()) defer sub.Release() @@ -615,7 +732,7 @@ func (ctx *arrayLoaderContext) loadList(dt arrow.ListLikeType) arrow.ArrayData { func (ctx *arrayLoaderContext) loadListView(dt arrow.VarLenListLikeType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 3) buffers = append(buffers, ctx.buffer(), ctx.buffer()) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) sub := ctx.loadChild(dt.Elem()) defer sub.Release() @@ -625,7 +742,7 @@ func (ctx *arrayLoaderContext) loadListView(dt arrow.VarLenListLikeType) arrow.A func (ctx *arrayLoaderContext) loadFixedSizeList(dt *arrow.FixedSizeListType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 1) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) sub := ctx.loadChild(dt.Elem()) defer sub.Release() @@ -635,9 +752,9 @@ func (ctx *arrayLoaderContext) loadFixedSizeList(dt *arrow.FixedSizeListType) ar func (ctx *arrayLoaderContext) loadStruct(dt *arrow.StructType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 1) - defer releaseBuffers(buffers) + defer memory.ReleaseBuffers(buffers) - subs := make([]arrow.ArrayData, len(dt.Fields())) + subs := make([]arrow.ArrayData, dt.NumFields()) for i, f := range dt.Fields() { subs[i] = ctx.loadChild(f.Type) } @@ -678,8 +795,8 @@ func (ctx *arrayLoaderContext) loadUnion(dt arrow.UnionType) arrow.ArrayData { } } - defer releaseBuffers(buffers) - subs := make([]arrow.ArrayData, len(dt.Fields())) + defer memory.ReleaseBuffers(buffers) + subs := make([]arrow.ArrayData, dt.NumFields()) for i, f := range dt.Fields() { subs[i] = ctx.loadChild(f.Type) } @@ -691,7 +808,7 @@ func (ctx *arrayLoaderContext) loadUnion(dt arrow.UnionType) arrow.ArrayData { return array.NewData(dt, int(field.Length()), buffers, subs, 0, 0) } -func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker, swapEndianness bool, mem memory.Allocator) (dictutils.Kind, error) { +func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body *memory.Buffer, swapEndianness bool, mem memory.Allocator) (dictutils.Kind, error) { var ( msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0) md flatbuf.DictionaryBatch @@ -704,6 +821,7 @@ func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker bodyCompress := data.Compression(nil) if bodyCompress != nil { codec = getDecompressor(bodyCompress.Codec()) + defer codec.Close() } id := md.Id() @@ -716,10 +834,10 @@ func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker ctx := &arrayLoaderContext{ src: ipcSource{ - meta: &data, - codec: codec, - r: body, - mem: mem, + meta: &data, + codec: codec, + rawBytes: body, + mem: mem, }, memo: memo, max: kMaxNestingDepth, @@ -742,10 +860,43 @@ func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker return dictutils.KindReplacement, nil } -func releaseBuffers(buffers []*memory.Buffer) { - for _, b := range buffers { - if b != nil { - b.Release() - } +type mappedFileBlock struct { + offset int64 + meta int32 + body int64 + + data []byte +} + +func (blk mappedFileBlock) Offset() int64 { return blk.offset } +func (blk mappedFileBlock) Meta() int32 { return blk.meta } +func (blk mappedFileBlock) Body() int64 { return blk.body } + +func (blk mappedFileBlock) section() []byte { + return blk.data[blk.offset : blk.offset+int64(blk.meta)+blk.body] +} + +func (blk mappedFileBlock) NewMessage() (*Message, error) { + var ( + body *memory.Buffer + meta *memory.Buffer + buf = blk.section() + ) + + metaBytes := buf[:blk.meta] + + prefix := 0 + switch binary.LittleEndian.Uint32(metaBytes) { + case 0: + case kIPCContToken: + prefix = 8 + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + prefix = 4 } + + meta = memory.NewBufferBytes(metaBytes[prefix:]) + body = memory.NewBufferBytes(buf[blk.meta : int64(blk.meta)+blk.body]) + return NewMessage(meta, body), nil } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_writer.go similarity index 76% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_writer.go index 12384225..b57cafbc 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_writer.go @@ -21,11 +21,11 @@ import ( "fmt" "io" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal/dictutils" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" ) // PayloadWriter is an interface for injecting a different payloadwriter @@ -37,23 +37,17 @@ type PayloadWriter interface { Close() error } -type pwriter struct { - w io.WriteSeeker - pos int64 +type fileWriter struct { + streamWriter schema *arrow.Schema - dicts []fileBlock - recs []fileBlock + dicts []dataBlock + recs []dataBlock } -func (w *pwriter) Start() error { +func (w *fileWriter) Start() error { var err error - err = w.updatePos() - if err != nil { - return fmt.Errorf("arrow/ipc: could not update position while in start: %w", err) - } - // only necessary to align to 8-byte boundary at the start of the file _, err = w.Write(Magic) if err != nil { @@ -65,22 +59,17 @@ func (w *pwriter) Start() error { return fmt.Errorf("arrow/ipc: could not align start block: %w", err) } - return err + return w.streamWriter.Start() } -func (w *pwriter) WritePayload(p Payload) error { - blk := fileBlock{Offset: w.pos, Meta: 0, Body: p.size} +func (w *fileWriter) WritePayload(p Payload) error { + blk := fileBlock{offset: w.pos, meta: 0, body: p.size} n, err := writeIPCPayload(w, p) if err != nil { return err } - blk.Meta = int32(n) - - err = w.updatePos() - if err != nil { - return fmt.Errorf("arrow/ipc: could not update position while in write-payload: %w", err) - } + blk.meta = int32(n) switch flatbuf.MessageHeader(p.msg) { case flatbuf.MessageHeaderDictionaryBatch: @@ -92,27 +81,18 @@ func (w *pwriter) WritePayload(p Payload) error { return nil } -func (w *pwriter) Close() error { +func (w *fileWriter) Close() error { var err error - // write file footer - err = w.updatePos() - if err != nil { - return fmt.Errorf("arrow/ipc: could not update position while in close: %w", err) + if err = w.streamWriter.Close(); err != nil { + return err } pos := w.pos - err = writeFileFooter(w.schema, w.dicts, w.recs, w) - if err != nil { + if err = writeFileFooter(w.schema, w.dicts, w.recs, w); err != nil { return fmt.Errorf("arrow/ipc: could not write file footer: %w", err) } - // write file footer length - err = w.updatePos() // not strictly needed as we passed w to writeFileFooter... - if err != nil { - return fmt.Errorf("arrow/ipc: could not compute file footer length: %w", err) - } - size := w.pos - pos if size <= 0 { return fmt.Errorf("arrow/ipc: invalid file footer size (size=%d)", size) @@ -133,13 +113,7 @@ func (w *pwriter) Close() error { return nil } -func (w *pwriter) updatePos() error { - var err error - w.pos, err = w.w.Seek(0, io.SeekCurrent) - return err -} - -func (w *pwriter) align(align int32) error { +func (w *fileWriter) align(align int32) error { remainder := paddedLength(w.pos, align) - w.pos if remainder == 0 { return nil @@ -149,12 +123,6 @@ func (w *pwriter) align(align int32) error { return err } -func (w *pwriter) Write(p []byte) (int, error) { - n, err := w.w.Write(p) - w.pos += int64(n) - return n, err -} - func writeIPCPayload(w io.Writer, p Payload) (int, error) { n, err := writeMessage(p.meta, kArrowIPCAlignment, w) if err != nil { @@ -259,18 +227,12 @@ func (ps payloads) Release() { // FileWriter is an Arrow file writer. type FileWriter struct { - w io.WriteSeeker + w io.Writer mem memory.Allocator - header struct { - started bool - offset int64 - } - - footer struct { - written bool - } + headerStarted bool + footerWritten bool pw PayloadWriter @@ -278,6 +240,7 @@ type FileWriter struct { mapper dictutils.Mapper codec flatbuf.CompressionType compressNP int + compressors []compressor minSpaceSavings *float64 // map of the last written dictionaries by id @@ -288,7 +251,7 @@ type FileWriter struct { } // NewFileWriter opens an Arrow file using the provided writer w. -func NewFileWriter(w io.WriteSeeker, opts ...Option) (*FileWriter, error) { +func NewFileWriter(w io.Writer, opts ...Option) (*FileWriter, error) { var ( cfg = newConfig(opts...) err error @@ -296,20 +259,15 @@ func NewFileWriter(w io.WriteSeeker, opts ...Option) (*FileWriter, error) { f := FileWriter{ w: w, - pw: &pwriter{w: w, schema: cfg.schema, pos: -1}, + pw: &fileWriter{streamWriter: streamWriter{w: w}, schema: cfg.schema}, mem: cfg.alloc, schema: cfg.schema, codec: cfg.codec, compressNP: cfg.compressNP, minSpaceSavings: cfg.minSpaceSavings, + compressors: make([]compressor, cfg.compressNP), } - pos, err := f.w.Seek(0, io.SeekCurrent) - if err != nil { - return nil, fmt.Errorf("arrow/ipc: could not seek current position: %w", err) - } - f.header.offset = pos - return &f, err } @@ -319,7 +277,7 @@ func (f *FileWriter) Close() error { return fmt.Errorf("arrow/ipc: could not write empty file: %w", err) } - if f.footer.written { + if f.footerWritten { return nil } @@ -327,7 +285,7 @@ func (f *FileWriter) Close() error { if err != nil { return fmt.Errorf("arrow/ipc: could not close payload writer: %w", err) } - f.footer.written = true + f.footerWritten = true return nil } @@ -345,7 +303,9 @@ func (f *FileWriter) Write(rec arrow.Record) error { const allow64b = true var ( data = Payload{msg: MessageRecordBatch} - enc = newRecordEncoder(f.mem, 0, kMaxNestingDepth, allow64b, f.codec, f.compressNP, f.minSpaceSavings) + enc = newRecordEncoder( + f.mem, 0, kMaxNestingDepth, allow64b, f.codec, f.compressNP, f.minSpaceSavings, f.compressors, + ) ) defer data.Release() @@ -363,14 +323,14 @@ func (f *FileWriter) Write(rec arrow.Record) error { } func (f *FileWriter) checkStarted() error { - if !f.header.started { + if !f.headerStarted { return f.start() } return nil } func (f *FileWriter) start() error { - f.header.started = true + f.headerStarted = true err := f.pw.Start() if err != nil { return err diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/ipc.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/ipc.go index 6c04b6f5..c4589da6 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/ipc.go @@ -19,10 +19,10 @@ package ipc import ( "io" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/arrio" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/arrio" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" ) const ( @@ -79,6 +79,7 @@ func newConfig(opts ...Option) *config { alloc: memory.NewGoAllocator(), codec: -1, // uncompressed ensureNativeEndian: true, + compressNP: 1, } for _, opt := range opts { @@ -132,9 +133,12 @@ func WithZstd() Option { // WithCompressConcurrency specifies a number of goroutines to spin up for // concurrent compression of the body buffers when writing compress IPC records. // If n <= 1 then compression will be done serially without goroutine -// parallelization. Default is 0. +// parallelization. Default is 1. func WithCompressConcurrency(n int) Option { return func(cfg *config) { + if n <= 0 { + n = 1 + } cfg.compressNP = n } } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/message.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/message.go index c5d0ec68..f989cf2a 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/message.go @@ -22,20 +22,20 @@ import ( "io" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" ) // MetadataVersion represents the Arrow metadata version. type MetadataVersion flatbuf.MetadataVersion const ( - MetadataV1 = MetadataVersion(flatbuf.MetadataVersionV1) // version for Arrow-0.1.0 - MetadataV2 = MetadataVersion(flatbuf.MetadataVersionV2) // version for Arrow-0.2.0 - MetadataV3 = MetadataVersion(flatbuf.MetadataVersionV3) // version for Arrow-0.3.0 to 0.7.1 - MetadataV4 = MetadataVersion(flatbuf.MetadataVersionV4) // version for >= Arrow-0.8.0 - MetadataV5 = MetadataVersion(flatbuf.MetadataVersionV5) // version for >= Arrow-1.0.0, backward compatible with v4 + MetadataV1 = MetadataVersion(flatbuf.MetadataVersionV1) // version for Arrow Format-0.1.0 + MetadataV2 = MetadataVersion(flatbuf.MetadataVersionV2) // version for Arrow Format-0.2.0 + MetadataV3 = MetadataVersion(flatbuf.MetadataVersionV3) // version for Arrow Format-0.3.0 to 0.7.1 + MetadataV4 = MetadataVersion(flatbuf.MetadataVersionV4) // version for >= Arrow Format-0.8.0 + MetadataV5 = MetadataVersion(flatbuf.MetadataVersionV5) // version for >= Arrow Format-1.0.0, backward compatible with v4 ) func (m MetadataVersion) String() string { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/metadata.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/metadata.go index 9bab47d6..b83c1a84 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/metadata.go @@ -23,11 +23,11 @@ import ( "io" "sort" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/endian" - "github.com/apache/arrow/go/v14/arrow/internal/dictutils" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" flatbuffers "github.com/google/flatbuffers/go" ) @@ -63,19 +63,23 @@ type bufferMetadata struct { } type fileBlock struct { - Offset int64 - Meta int32 - Body int64 + offset int64 + meta int32 + body int64 r io.ReaderAt mem memory.Allocator } -func fileBlocksToFB(b *flatbuffers.Builder, blocks []fileBlock, start startVecFunc) flatbuffers.UOffsetT { +func (blk fileBlock) Offset() int64 { return blk.offset } +func (blk fileBlock) Meta() int32 { return blk.meta } +func (blk fileBlock) Body() int64 { return blk.body } + +func fileBlocksToFB(b *flatbuffers.Builder, blocks []dataBlock, start startVecFunc) flatbuffers.UOffsetT { start(b, len(blocks)) for i := len(blocks) - 1; i >= 0; i-- { blk := blocks[i] - flatbuf.CreateBlock(b, blk.Offset, blk.Meta, blk.Body) + flatbuf.CreateBlock(b, blk.Offset(), blk.Meta(), blk.Body()) } return b.EndVector(len(blocks)) @@ -91,7 +95,7 @@ func (blk fileBlock) NewMessage() (*Message, error) { ) meta = memory.NewResizableBuffer(blk.mem) - meta.Resize(int(blk.Meta)) + meta.Resize(int(blk.meta)) defer meta.Release() buf = meta.Bytes() @@ -112,12 +116,12 @@ func (blk fileBlock) NewMessage() (*Message, error) { } // drop buf-size already known from blk.Meta - meta = memory.SliceBuffer(meta, prefix, int(blk.Meta)-prefix) + meta = memory.SliceBuffer(meta, prefix, int(blk.meta)-prefix) defer meta.Release() body = memory.NewResizableBuffer(blk.mem) defer body.Release() - body.Resize(int(blk.Body)) + body.Resize(int(blk.body)) buf = body.Bytes() _, err = io.ReadFull(r, buf) if err != nil { @@ -128,7 +132,7 @@ func (blk fileBlock) NewMessage() (*Message, error) { } func (blk fileBlock) section() io.Reader { - return io.NewSectionReader(blk.r, blk.Offset, int64(blk.Meta)+blk.Body) + return io.NewSectionReader(blk.r, blk.offset, int64(blk.meta)+blk.body) } func unitFromFB(unit flatbuf.TimeUnit) arrow.TimeUnit { @@ -281,20 +285,12 @@ func (fv *fieldVisitor) visit(field arrow.Field) { fv.dtype = flatbuf.TypeFloatingPoint fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) - case *arrow.Decimal128Type: + case arrow.DecimalType: fv.dtype = flatbuf.TypeDecimal flatbuf.DecimalStart(fv.b) - flatbuf.DecimalAddPrecision(fv.b, dt.Precision) - flatbuf.DecimalAddScale(fv.b, dt.Scale) - flatbuf.DecimalAddBitWidth(fv.b, 128) - fv.offset = flatbuf.DecimalEnd(fv.b) - - case *arrow.Decimal256Type: - fv.dtype = flatbuf.TypeDecimal - flatbuf.DecimalStart(fv.b) - flatbuf.DecimalAddPrecision(fv.b, dt.Precision) - flatbuf.DecimalAddScale(fv.b, dt.Scale) - flatbuf.DecimalAddBitWidth(fv.b, 256) + flatbuf.DecimalAddPrecision(fv.b, dt.GetPrecision()) + flatbuf.DecimalAddScale(fv.b, dt.GetScale()) + flatbuf.DecimalAddBitWidth(fv.b, int32(dt.BitWidth())) fv.offset = flatbuf.DecimalEnd(fv.b) case *arrow.FixedSizeBinaryType: @@ -323,6 +319,16 @@ func (fv *fieldVisitor) visit(field arrow.Field) { flatbuf.LargeUtf8Start(fv.b) fv.offset = flatbuf.LargeUtf8End(fv.b) + case *arrow.BinaryViewType: + fv.dtype = flatbuf.TypeBinaryView + flatbuf.BinaryViewStart(fv.b) + fv.offset = flatbuf.BinaryViewEnd(fv.b) + + case *arrow.StringViewType: + fv.dtype = flatbuf.TypeUtf8View + flatbuf.Utf8ViewStart(fv.b) + fv.offset = flatbuf.Utf8ViewEnd(fv.b) + case *arrow.Date32Type: fv.dtype = flatbuf.TypeDate flatbuf.DateStart(fv.b) @@ -363,7 +369,7 @@ func (fv *fieldVisitor) visit(field arrow.Field) { case *arrow.StructType: fv.dtype = flatbuf.TypeStruct_ - offsets := make([]flatbuffers.UOffsetT, len(dt.Fields())) + offsets := make([]flatbuffers.UOffsetT, dt.NumFields()) for i, field := range dt.Fields() { offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo) } @@ -462,7 +468,7 @@ func (fv *fieldVisitor) visit(field arrow.Field) { case arrow.UnionType: fv.dtype = flatbuf.TypeUnion - offsets := make([]flatbuffers.UOffsetT, len(dt.Fields())) + offsets := make([]flatbuffers.UOffsetT, dt.NumFields()) for i, field := range dt.Fields() { offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo) } @@ -713,6 +719,12 @@ func concreteTypeFromFB(typ flatbuf.Type, data flatbuffers.Table, children []arr case flatbuf.TypeLargeUtf8: return arrow.BinaryTypes.LargeString, nil + case flatbuf.TypeUtf8View: + return arrow.BinaryTypes.StringView, nil + + case flatbuf.TypeBinaryView: + return arrow.BinaryTypes.BinaryView, nil + case flatbuf.TypeBool: return arrow.FixedWidthTypes.Boolean, nil @@ -931,6 +943,10 @@ func floatToFB(b *flatbuffers.Builder, bw int32) flatbuffers.UOffsetT { func decimalFromFB(data flatbuf.Decimal) (arrow.DataType, error) { switch data.BitWidth() { + case 32: + return &arrow.Decimal32Type{Precision: data.Precision(), Scale: data.Scale()}, nil + case 64: + return &arrow.Decimal64Type{Precision: data.Precision(), Scale: data.Scale()}, nil case 128: return &arrow.Decimal128Type{Precision: data.Precision(), Scale: data.Scale()}, nil case 256: @@ -1084,10 +1100,10 @@ func schemaFromFB(schema *flatbuf.Schema, memo *dictutils.Memo) (*arrow.Schema, } func schemaToFB(b *flatbuffers.Builder, schema *arrow.Schema, memo *dictutils.Mapper) flatbuffers.UOffsetT { - fields := make([]flatbuffers.UOffsetT, len(schema.Fields())) + fields := make([]flatbuffers.UOffsetT, schema.NumFields()) pos := dictutils.NewFieldPos() - for i, field := range schema.Fields() { - fields[i] = fieldToFB(b, pos.Child(int32(i)), field, memo) + for i := 0; i < schema.NumFields(); i++ { + fields[i] = fieldToFB(b, pos.Child(int32(i)), schema.Field(i), memo) } flatbuf.SchemaStartFieldsVector(b, len(fields)) @@ -1144,7 +1160,7 @@ func writeSchemaMessage(schema *arrow.Schema, mem memory.Allocator, dict *dictut return writeMessageFB(b, mem, flatbuf.MessageHeaderSchema, schemaFB, 0) } -func writeFileFooter(schema *arrow.Schema, dicts, recs []fileBlock, w io.Writer) error { +func writeFileFooter(schema *arrow.Schema, dicts, recs []dataBlock, w io.Writer) error { var ( b = flatbuffers.NewBuilder(1024) memo dictutils.Mapper @@ -1168,15 +1184,15 @@ func writeFileFooter(schema *arrow.Schema, dicts, recs []fileBlock, w io.Writer) return err } -func writeRecordMessage(mem memory.Allocator, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) *memory.Buffer { +func writeRecordMessage(mem memory.Allocator, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType, variadicCounts []int64) *memory.Buffer { b := flatbuffers.NewBuilder(0) - recFB := recordToFB(b, size, bodyLength, fields, meta, codec) + recFB := recordToFB(b, size, bodyLength, fields, meta, codec, variadicCounts) return writeMessageFB(b, mem, flatbuf.MessageHeaderRecordBatch, recFB, bodyLength) } -func writeDictionaryMessage(mem memory.Allocator, id int64, isDelta bool, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) *memory.Buffer { +func writeDictionaryMessage(mem memory.Allocator, id int64, isDelta bool, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType, variadicCounts []int64) *memory.Buffer { b := flatbuffers.NewBuilder(0) - recFB := recordToFB(b, size, bodyLength, fields, meta, codec) + recFB := recordToFB(b, size, bodyLength, fields, meta, codec, variadicCounts) flatbuf.DictionaryBatchStart(b) flatbuf.DictionaryBatchAddId(b, id) @@ -1186,7 +1202,7 @@ func writeDictionaryMessage(mem memory.Allocator, id int64, isDelta bool, size, return writeMessageFB(b, mem, flatbuf.MessageHeaderDictionaryBatch, dictFB, bodyLength) } -func recordToFB(b *flatbuffers.Builder, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) flatbuffers.UOffsetT { +func recordToFB(b *flatbuffers.Builder, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType, variadicCounts []int64) flatbuffers.UOffsetT { fieldsFB := writeFieldNodes(b, fields, flatbuf.RecordBatchStartNodesVector) metaFB := writeBuffers(b, meta, flatbuf.RecordBatchStartBuffersVector) var bodyCompressFB flatbuffers.UOffsetT @@ -1194,10 +1210,24 @@ func recordToFB(b *flatbuffers.Builder, size, bodyLength int64, fields []fieldMe bodyCompressFB = writeBodyCompression(b, codec) } + var vcFB *flatbuffers.UOffsetT + if len(variadicCounts) > 0 { + flatbuf.RecordBatchStartVariadicBufferCountsVector(b, len(variadicCounts)) + for i := len(variadicCounts) - 1; i >= 0; i-- { + b.PrependInt64(variadicCounts[i]) + } + vcFBVal := b.EndVector(len(variadicCounts)) + vcFB = &vcFBVal + } + flatbuf.RecordBatchStart(b) flatbuf.RecordBatchAddLength(b, size) flatbuf.RecordBatchAddNodes(b, fieldsFB) flatbuf.RecordBatchAddBuffers(b, metaFB) + if vcFB != nil { + flatbuf.RecordBatchAddVariadicBufferCounts(b, *vcFB) + } + if codec != -1 { flatbuf.RecordBatchAddCompression(b, bodyCompressFB) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/reader.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/reader.go index 1f684c1f..2a4f859b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/reader.go @@ -17,19 +17,19 @@ package ipc import ( - "bytes" "errors" "fmt" "io" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/endian" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/internal/dictutils" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/utils" ) // Reader reads records from an io.Reader. @@ -60,7 +60,7 @@ type Reader struct { func NewReaderFromMessageReader(r MessageReader, opts ...Option) (reader *Reader, err error) { defer func() { if pErr := recover(); pErr != nil { - err = fmt.Errorf("arrow/ipc: unknown error while reading: %v", pErr) + err = utils.FormatRecoveredError("arrow/ipc: unknown error while reading", pErr) } }() cfg := newConfig() @@ -200,7 +200,7 @@ func (r *Reader) getInitialDicts() bool { if msg.Type() != MessageDictionaryBatch { r.err = fmt.Errorf("arrow/ipc: IPC stream did not have the expected (%d) dictionaries at the start of the stream", numDicts) } - if _, err := readDictionary(&r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem); err != nil { + if _, err := readDictionary(&r.memo, msg.meta, msg.body, r.swapEndianness, r.mem); err != nil { r.done = true r.err = err return false @@ -213,7 +213,7 @@ func (r *Reader) getInitialDicts() bool { func (r *Reader) next() bool { defer func() { if pErr := recover(); pErr != nil { - r.err = fmt.Errorf("arrow/ipc: unknown error while reading: %v", pErr) + r.err = utils.FormatRecoveredError("arrow/ipc: unknown error while reading", pErr) } }() if r.schema == nil { @@ -232,7 +232,7 @@ func (r *Reader) next() bool { msg, r.err = r.r.Message() for msg != nil && msg.Type() == MessageDictionaryBatch { - if _, r.err = readDictionary(&r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem); r.err != nil { + if _, r.err = readDictionary(&r.memo, msg.meta, msg.body, r.swapEndianness, r.mem); r.err != nil { r.done = true return false } @@ -251,7 +251,7 @@ func (r *Reader) next() bool { return false } - r.rec = newRecord(r.schema, &r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem) + r.rec = newRecord(r.schema, &r.memo, msg.meta, msg.body, r.swapEndianness, r.mem) return true } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/writer.go similarity index 74% rename from vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go rename to vendor/github.com/apache/arrow-go/v18/arrow/ipc/writer.go index a97f47ef..96f082fb 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/writer.go @@ -26,28 +26,29 @@ import ( "sync" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/internal" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/internal/dictutils" - "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/utils" ) -type swriter struct { +type streamWriter struct { w io.Writer pos int64 } -func (w *swriter) Start() error { return nil } -func (w *swriter) Close() error { +func (w *streamWriter) Start() error { return nil } +func (w *streamWriter) Close() error { _, err := w.Write(kEOS[:]) return err } -func (w *swriter) WritePayload(p Payload) error { +func (w *streamWriter) WritePayload(p Payload) error { _, err := writeIPCPayload(w, p) if err != nil { return err @@ -55,7 +56,7 @@ func (w *swriter) WritePayload(p Payload) error { return nil } -func (w *swriter) Write(p []byte) (int, error) { +func (w *streamWriter) Write(p []byte) (int, error) { n, err := w.w.Write(p) w.pos += int64(n) return n, err @@ -85,6 +86,7 @@ type Writer struct { mapper dictutils.Mapper codec flatbuf.CompressionType compressNP int + compressors []compressor minSpaceSavings *float64 // map of the last written dictionaries by id @@ -106,6 +108,7 @@ func NewWriterWithPayloadWriter(pw PayloadWriter, opts ...Option) *Writer { compressNP: cfg.compressNP, minSpaceSavings: cfg.minSpaceSavings, emitDictDeltas: cfg.emitDictDeltas, + compressors: make([]compressor, cfg.compressNP), } } @@ -115,10 +118,12 @@ func NewWriter(w io.Writer, opts ...Option) *Writer { return &Writer{ w: w, mem: cfg.alloc, - pw: &swriter{w: w}, + pw: &streamWriter{w: w}, schema: cfg.schema, codec: cfg.codec, emitDictDeltas: cfg.emitDictDeltas, + compressNP: cfg.compressNP, + compressors: make([]compressor, cfg.compressNP), } } @@ -150,26 +155,39 @@ func (w *Writer) Close() error { func (w *Writer) Write(rec arrow.Record) (err error) { defer func() { if pErr := recover(); pErr != nil { - err = fmt.Errorf("arrow/ipc: unknown error while writing: %v", pErr) + err = utils.FormatRecoveredError("arrow/ipc: unknown error while writing", pErr) } }() + incomingSchema := rec.Schema() + if !w.started { + if w.schema == nil { + w.schema = incomingSchema + } err := w.start() if err != nil { return err } } - schema := rec.Schema() - if schema == nil || !schema.Equal(w.schema) { + if incomingSchema == nil || !incomingSchema.Equal(w.schema) { return errInconsistentSchema } const allow64b = true var ( data = Payload{msg: MessageRecordBatch} - enc = newRecordEncoder(w.mem, 0, kMaxNestingDepth, allow64b, w.codec, w.compressNP, w.minSpaceSavings) + enc = newRecordEncoder( + w.mem, + 0, + kMaxNestingDepth, + allow64b, + w.codec, + w.compressNP, + w.minSpaceSavings, + w.compressors, + ) ) defer data.Release() @@ -277,7 +295,7 @@ type dictEncoder struct { } func (d *dictEncoder) encodeMetadata(p *Payload, isDelta bool, id, nrows int64) error { - p.meta = writeDictionaryMessage(d.mem, id, isDelta, nrows, p.size, d.fields, d.meta, d.codec) + p.meta = writeDictionaryMessage(d.mem, id, isDelta, nrows, p.size, d.fields, d.meta, d.codec, d.variadicCounts) return nil } @@ -300,18 +318,29 @@ func (d *dictEncoder) Encode(p *Payload, id int64, isDelta bool, dict arrow.Arra type recordEncoder struct { mem memory.Allocator - fields []fieldMetadata - meta []bufferMetadata + fields []fieldMetadata + meta []bufferMetadata + variadicCounts []int64 depth int64 start int64 allow64b bool codec flatbuf.CompressionType compressNP int + compressors []compressor minSpaceSavings *float64 } -func newRecordEncoder(mem memory.Allocator, startOffset, maxDepth int64, allow64b bool, codec flatbuf.CompressionType, compressNP int, minSpaceSavings *float64) *recordEncoder { +func newRecordEncoder( + mem memory.Allocator, + startOffset, + maxDepth int64, + allow64b bool, + codec flatbuf.CompressionType, + compressNP int, + minSpaceSavings *float64, + compressors []compressor, +) *recordEncoder { return &recordEncoder{ mem: mem, start: startOffset, @@ -319,6 +348,7 @@ func newRecordEncoder(mem memory.Allocator, startOffset, maxDepth int64, allow64 allow64b: allow64b, codec: codec, compressNP: compressNP, + compressors: compressors, minSpaceSavings: minSpaceSavings, } } @@ -338,6 +368,13 @@ func (w *recordEncoder) reset() { w.fields = make([]fieldMetadata, 0) } +func (w *recordEncoder) getCompressor(id int) compressor { + if w.compressors[id] == nil { + w.compressors[id] = getCompressor(w.codec) + } + return w.compressors[id] +} + func (w *recordEncoder) compressBodyBuffers(p *Payload) error { compress := func(idx int, codec compressor) error { if p.body[idx] == nil || p.body[idx].Len() == 0 { @@ -376,7 +413,7 @@ func (w *recordEncoder) compressBodyBuffers(p *Payload) error { } if w.compressNP <= 1 { - codec := getCompressor(w.codec) + codec := w.getCompressor(0) for idx := range p.body { if err := compress(idx, codec); err != nil { return err @@ -393,11 +430,11 @@ func (w *recordEncoder) compressBodyBuffers(p *Payload) error { ) defer cancel() - for i := 0; i < w.compressNP; i++ { + for workerID := 0; workerID < w.compressNP; workerID++ { wg.Add(1) - go func() { + go func(id int) { defer wg.Done() - codec := getCompressor(w.codec) + codec := w.getCompressor(id) for { select { case idx, ok := <-ch: @@ -416,7 +453,7 @@ func (w *recordEncoder) compressBodyBuffers(p *Payload) error { return } } - }() + }(workerID) } for idx := range p.body { @@ -566,7 +603,7 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { // non-zero offset: slice the buffer offset := int64(data.Offset()) * typeWidth // send padding if available - len := minI64(bitutil.CeilByte64(arrLen*typeWidth), int64(values.Len())-offset) + len := min(bitutil.CeilByte64(arrLen*typeWidth), int64(values.Len())-offset) values = memory.NewBufferBytes(values.Bytes()[offset : offset+len]) default: if values != nil { @@ -590,9 +627,13 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { case needTruncate(int64(data.Offset()), values, totalDataBytes): // slice data buffer to include the range we need now. var ( - beg = arr.ValueOffset64(0) - len = minI64(paddedLength(totalDataBytes, kArrowAlignment), int64(totalDataBytes)) + beg int64 = 0 + len = min(paddedLength(totalDataBytes, kArrowAlignment), int64(totalDataBytes)) ) + if arr.Len() > 0 { + beg = arr.ValueOffset64(0) + } + values = memory.NewBufferBytes(data.Buffers()[2].Bytes()[beg : beg+len]) default: if values != nil { @@ -602,6 +643,33 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { p.body = append(p.body, voffsets) p.body = append(p.body, values) + case arrow.BinaryViewDataType: + data := arr.Data() + values := data.Buffers()[1] + arrLen := int64(arr.Len()) + typeWidth := int64(arrow.ViewHeaderSizeBytes) + minLength := paddedLength(arrLen*typeWidth, kArrowAlignment) + + switch { + case needTruncate(int64(data.Offset()), values, minLength): + // non-zero offset: slice the buffer + offset := data.Offset() * int(typeWidth) + // send padding if available + len := int(min(bitutil.CeilByte64(arrLen*typeWidth), int64(values.Len()-offset))) + values = memory.SliceBuffer(values, offset, len) + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, values) + + w.variadicCounts = append(w.variadicCounts, int64(len(data.Buffers())-2)) + for _, b := range data.Buffers()[2:] { + b.Retain() + p.body = append(p.body, b) + } + case *arrow.StructType: w.depth-- arr := arr.(*array.Struct) @@ -718,42 +786,22 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { w.depth++ case *arrow.ListViewType, *arrow.LargeListViewType: - data := arr.Data() arr := arr.(array.VarLenListLike) - offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits() - rngOff, rngLen := array.RangeOfValuesUsed(arr) - voffsets := w.getValueOffsetsAtBaseValue(arr, rngOff) - p.body = append(p.body, voffsets) - vsizes := data.Buffers()[2] - if vsizes != nil { - if data.Offset() != 0 || vsizes.Len() > offsetTraits.BytesRequired(arr.Len()) { - beg := offsetTraits.BytesRequired(data.Offset()) - end := beg + offsetTraits.BytesRequired(data.Len()) - vsizes = memory.NewBufferBytes(vsizes.Bytes()[beg:end]) - } else { - vsizes.Retain() - } - } + voffsets, minOffset, maxEnd := w.getZeroBasedListViewOffsets(arr) + vsizes := w.getListViewSizes(arr) + + p.body = append(p.body, voffsets) p.body = append(p.body, vsizes) w.depth-- var ( - values = arr.ListValues() - mustRelease = false - values_offset = int64(rngOff) - values_end = int64(rngOff + rngLen) + values = arr.ListValues() ) - defer func() { - if mustRelease { - values.Release() - } - }() - if arr.Len() > 0 && values_end < int64(values.Len()) { - // must also slice the values - values = array.NewSlice(values, values_offset, values_end) - mustRelease = true + if minOffset != 0 || maxEnd < int64(values.Len()) { + values = array.NewSlice(values, minOffset, maxEnd) + defer values.Release() } err := w.visit(p, values) @@ -813,19 +861,35 @@ func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) *memory.Buffer return nil } + dataTypeWidth := arr.DataType().Layout().Buffers[1].ByteWidth + // if we have a non-zero offset, then the value offsets do not start at // zero. we must a) create a new offsets array with shifted offsets and // b) slice the values array accordingly - // + hasNonZeroOffset := data.Offset() != 0 + // or if there are more value offsets than values (the array has been sliced) // we need to trim off the trailing offsets - needsTruncateAndShift := data.Offset() != 0 || offsetBytesNeeded < voffsets.Len() + hasMoreOffsetsThanValues := offsetBytesNeeded < voffsets.Len() + + // or if the offsets do not start from the zero index, we need to shift them + // and slice the values array + var firstOffset int64 + if dataTypeWidth == 8 { + firstOffset = arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[0] + } else { + firstOffset = int64(arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[0]) + } + offsetsDoNotStartFromZero := firstOffset != 0 + + // determine whether the offsets array should be shifted + needsTruncateAndShift := hasNonZeroOffset || hasMoreOffsetsThanValues || offsetsDoNotStartFromZero if needsTruncateAndShift { shiftedOffsets := memory.NewResizableBuffer(w.mem) shiftedOffsets.Resize(offsetBytesNeeded) - switch arr.DataType().Layout().Buffers[1].ByteWidth { + switch dataTypeWidth { case 8: dest := arrow.Int64Traits.CastFromBytes(shiftedOffsets.Bytes()) offsets := arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()+1] @@ -854,61 +918,92 @@ func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) *memory.Buffer return voffsets } -// Truncates the offsets if needed and shifts the values if minOffset > 0. -// The offsets returned are corrected assuming the child values are truncated -// and now start at minOffset. -// -// This function only works on offset buffers of ListViews and LargeListViews. -// TODO(felipecrv): Unify this with getZeroBasedValueOffsets. -func (w *recordEncoder) getValueOffsetsAtBaseValue(arr arrow.Array, minOffset int) *memory.Buffer { - data := arr.Data() - voffsets := data.Buffers()[1] - offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits() - offsetBytesNeeded := offsetTraits.BytesRequired(data.Len()) +func getZeroBasedListViewOffsets[OffsetT int32 | int64](mem memory.Allocator, arr array.VarLenListLike) (valueOffsets *memory.Buffer, minOffset, maxEnd OffsetT) { + requiredBytes := int(unsafe.Sizeof(minOffset)) * arr.Len() + if arr.Data().Offset() == 0 { + // slice offsets to used extent, in case we have truncated slice + minOffset, maxEnd = 0, OffsetT(arr.ListValues().Len()) + valueOffsets = arr.Data().Buffers()[1] + if valueOffsets.Len() > requiredBytes { + valueOffsets = memory.SliceBuffer(valueOffsets, 0, requiredBytes) + } else { + valueOffsets.Retain() + } + return + } - if voffsets == nil || voffsets.Len() == 0 { - return nil + // non-zero offset, it's likely that the smallest offset is not zero + // we must a) create a new offsets array with shifted offsets and + // b) slice the values array accordingly + + valueOffsets = memory.NewResizableBuffer(mem) + valueOffsets.Resize(requiredBytes) + if arr.Len() > 0 { + // max value of int32/int64 based on type + minOffset = (^OffsetT(0)) << ((8 * unsafe.Sizeof(minOffset)) - 1) + for i := 0; i < arr.Len(); i++ { + start, end := arr.ValueOffsets(i) + minOffset = utils.Min(minOffset, OffsetT(start)) + maxEnd = utils.Max(maxEnd, OffsetT(end)) + } } - needsTruncate := data.Offset() != 0 || offsetBytesNeeded < voffsets.Len() - needsShift := minOffset > 0 + offsets := arrow.GetData[OffsetT](arr.Data().Buffers()[1].Bytes())[arr.Data().Offset():] + destOffset := arrow.GetData[OffsetT](valueOffsets.Bytes()) + for i := 0; i < arr.Len(); i++ { + destOffset[i] = offsets[i] - minOffset + } + return +} - if needsTruncate || needsShift { - shiftedOffsets := memory.NewResizableBuffer(w.mem) - shiftedOffsets.Resize(offsetBytesNeeded) +func getListViewSizes[OffsetT int32 | int64](arr array.VarLenListLike) *memory.Buffer { + var z OffsetT + requiredBytes := int(unsafe.Sizeof(z)) * arr.Len() + sizes := arr.Data().Buffers()[2] - switch arr.DataType().Layout().Buffers[1].ByteWidth { - case 8: - dest := arrow.Int64Traits.CastFromBytes(shiftedOffsets.Bytes()) - offsets := arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()] + if arr.Data().Offset() != 0 || sizes.Len() > requiredBytes { + // slice offsets to used extent, in case we have truncated slice + offsetBytes := arr.Data().Offset() * int(unsafe.Sizeof(z)) + sizes = memory.SliceBuffer(sizes, offsetBytes, requiredBytes) + } else { + sizes.Retain() + } + return sizes +} - if minOffset > 0 { - for i, o := range offsets { - dest[i] = o - int64(minOffset) - } - } else { - copy(dest, offsets) - } - default: - debug.Assert(arr.DataType().Layout().Buffers[1].ByteWidth == 4, "invalid offset bytewidth") - dest := arrow.Int32Traits.CastFromBytes(shiftedOffsets.Bytes()) - offsets := arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()] +func (w *recordEncoder) getZeroBasedListViewOffsets(arr array.VarLenListLike) (*memory.Buffer, int64, int64) { + if arr.Len() == 0 { + return nil, 0, 0 + } - if minOffset > 0 { - for i, o := range offsets { - dest[i] = o - int32(minOffset) - } - } else { - copy(dest, offsets) - } - } + var ( + outOffsets *memory.Buffer + minOff, maxEnd int64 + ) - voffsets = shiftedOffsets - } else { - voffsets.Retain() + switch v := arr.(type) { + case *array.ListView: + voffsets, outOff, outEnd := getZeroBasedListViewOffsets[int32](w.mem, v) + outOffsets = voffsets + minOff, maxEnd = int64(outOff), int64(outEnd) + case *array.LargeListView: + outOffsets, minOff, maxEnd = getZeroBasedListViewOffsets[int64](w.mem, v) } + return outOffsets, minOff, maxEnd +} - return voffsets +func (w *recordEncoder) getListViewSizes(arr array.VarLenListLike) *memory.Buffer { + if arr.Len() == 0 { + return nil + } + + switch v := arr.(type) { + case *array.ListView: + return getListViewSizes[int32](v) + case *array.LargeListView: + return getListViewSizes[int64](v) + } + return nil } func (w *recordEncoder) rebaseDenseUnionValueOffsets(arr *array.DenseUnion, offsets, lengths []int32) *memory.Buffer { @@ -933,7 +1028,7 @@ func (w *recordEncoder) rebaseDenseUnionValueOffsets(arr *array.DenseUnion, offs } else { shiftedOffsets[i] = unshiftedOffsets[i] - offsets[c] } - lengths[c] = maxI32(lengths[c], shiftedOffsets[i]+1) + lengths[c] = max(lengths[c], shiftedOffsets[i]+1) } return shiftedOffsetsBuf } @@ -946,7 +1041,7 @@ func (w *recordEncoder) Encode(p *Payload, rec arrow.Record) error { } func (w *recordEncoder) encodeMetadata(p *Payload, nrows int64) error { - p.meta = writeRecordMessage(w.mem, nrows, p.size, w.fields, w.meta, w.codec) + p.meta = writeRecordMessage(w.mem, nrows, p.size, w.fields, w.meta, w.codec, w.variadicCounts) return nil } @@ -976,7 +1071,7 @@ func getTruncatedBuffer(offset, length int64, byteWidth int32, buf *memory.Buffe paddedLen := paddedLength(length*int64(byteWidth), kArrowAlignment) if offset != 0 || paddedLen < int64(buf.Len()) { - return memory.SliceBuffer(buf, int(offset*int64(byteWidth)), int(minI64(paddedLen, int64(buf.Len())))) + return memory.SliceBuffer(buf, int(offset*int64(byteWidth)), int(min(paddedLen, int64(buf.Len())))) } buf.Retain() return buf @@ -989,16 +1084,37 @@ func needTruncate(offset int64, buf *memory.Buffer, minLength int64) bool { return offset != 0 || minLength < int64(buf.Len()) } -func minI64(a, b int64) int64 { - if a < b { - return a +// GetRecordBatchPayload produces the ipc payload for a given record batch. +// The resulting payload itself must be released by the caller via the Release +// method after it is no longer needed. +func GetRecordBatchPayload(batch arrow.Record, opts ...Option) (Payload, error) { + cfg := newConfig(opts...) + var ( + data = Payload{msg: MessageRecordBatch} + enc = newRecordEncoder( + cfg.alloc, + 0, + kMaxNestingDepth, + true, + cfg.codec, + cfg.compressNP, + cfg.minSpaceSavings, + make([]compressor, cfg.compressNP), + ) + ) + + err := enc.Encode(&data, batch) + if err != nil { + return Payload{}, err } - return b + + return data, nil } -func maxI32(a, b int32) int32 { - if a > b { - return a - } - return b +// GetSchemaPayload produces the ipc payload for a given schema. +func GetSchemaPayload(schema *arrow.Schema, mem memory.Allocator) Payload { + var mapper dictutils.Mapper + mapper.ImportSchema(schema) + ps := payloadFromSchema(schema, mem, &mapper) + return ps[0] } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile b/vendor/github.com/apache/arrow-go/v18/arrow/memory/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/Makefile diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/allocator.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/allocator.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/buffer.go similarity index 86% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/buffer.go index 2ddb3f82..04722225 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/buffer.go @@ -19,7 +19,7 @@ package memory import ( "sync/atomic" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) // Buffer is a wrapper type for a buffer of bytes. @@ -33,6 +33,18 @@ type Buffer struct { parent *Buffer } +// NewBufferWithAllocator returns a buffer with the mutable flag set +// as false. The intention here is to allow wrapping a byte slice along +// with an allocator as a buffer to track the lifetime via refcounts +// in order to call Free when the refcount goes to zero. +// +// The primary example this is used for, is currently importing data +// through the c data interface and tracking the lifetime of the +// imported buffers. +func NewBufferWithAllocator(data []byte, mem Allocator) *Buffer { + return &Buffer{refCount: 1, buf: data, length: len(data), mem: mem} +} + // NewBufferBytes creates a fixed-size buffer from the specified data. func NewBufferBytes(data []byte) *Buffer { return &Buffer{refCount: 0, buf: data, length: len(data)} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator.go index 85ee4452..57f3d922 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator.go @@ -14,15 +14,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build cgo -// +build ccalloc +//go:build cgo && ccalloc +// +build cgo,ccalloc package memory import ( "runtime" - cga "github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc" + cga "github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc" ) // CgoArrowAllocator is an allocator which exposes the C++ memory pool class diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator_defaults.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator_defaults.go index 501431a0..0a2e9a34 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator_defaults.go @@ -14,9 +14,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build cgo -// +build ccalloc -// +build !cclog +//go:build cgo && ccalloc && !cclog +// +build cgo,ccalloc,!cclog package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator_logging.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator_logging.go index 01ad6b39..fe2e3a94 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/cgo_allocator_logging.go @@ -14,9 +14,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build cgo -// +build ccalloc -// +build cclog +//go:build cgo && ccalloc && cclog +// +build cgo,ccalloc,cclog package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/checked_allocator.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/checked_allocator.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/default_allocator.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/default_allocator.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/default_mallocator.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/default_mallocator.go index 12ad0846..4eefc1e9 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/default_mallocator.go @@ -19,7 +19,7 @@ package memory import ( - "github.com/apache/arrow/go/v14/arrow/memory/mallocator" + "github.com/apache/arrow-go/v18/arrow/memory/mallocator" ) // DefaultAllocator is a default implementation of Allocator and can be used anywhere diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/doc.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/go_allocator.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/go_allocator.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc b/vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/allocator.cc similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/allocator.cc diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/allocator.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/allocator.go index 48f34d86..e7cc0491 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/allocator.go @@ -25,7 +25,6 @@ package cgoalloc // #include "allocator.h" import "C" import ( - "reflect" "unsafe" ) @@ -35,20 +34,14 @@ type CGOMemPool = C.ArrowMemoryPool // CgoPoolAlloc allocates a block of memory of length 'size' using the memory // pool that is passed in. func CgoPoolAlloc(pool CGOMemPool, size int) []byte { - var ret []byte if size == 0 { - return ret + return []byte{} } var out *C.uint8_t C.arrow_pool_allocate(pool, C.int64_t(size), (**C.uint8_t)(unsafe.Pointer(&out))) - s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) - s.Data = uintptr(unsafe.Pointer(out)) - s.Len = size - s.Cap = size - - return ret + return unsafe.Slice((*byte)(unsafe.Pointer(out)), size) } // CgoPoolRealloc calls 'reallocate' on the block of memory passed in which must @@ -59,16 +52,10 @@ func CgoPoolRealloc(pool CGOMemPool, size int, b []byte) []byte { } oldSize := C.int64_t(len(b)) - data := (*C.uint8_t)(unsafe.Pointer(&b[0])) + data := (*C.uint8_t)(unsafe.SliceData(b)) C.arrow_pool_reallocate(pool, oldSize, C.int64_t(size), &data) - var ret []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) - s.Data = uintptr(unsafe.Pointer(data)) - s.Len = size - s.Cap = size - - return ret + return unsafe.Slice((*byte)(unsafe.Pointer(data)), size) } // CgoPoolFree uses the indicated memory pool to free a block of memory. The diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h b/vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/allocator.h similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/allocator.h diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h b/vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/helpers.h similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc/helpers.h diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/doc.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/mallocator.go similarity index 55% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/mallocator.go index 18e0377c..994cfdb2 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/mallocator.go @@ -19,25 +19,23 @@ package mallocator // #include // #include -// -// void* realloc_and_initialize(void* ptr, size_t old_len, size_t new_len) { -// void* new_ptr = realloc(ptr, new_len); -// if (new_ptr && new_len > old_len) { -// memset(new_ptr + old_len, 0, new_len - old_len); -// } -// return new_ptr; -// } import "C" import ( - "reflect" + "sync" "sync/atomic" "unsafe" ) +func roundToPowerOf2(v, round uintptr) uintptr { + forceCarry := round - 1 + truncateMask := ^forceCarry + return (v + forceCarry) & truncateMask +} + // Mallocator is an allocator which defers to libc malloc. // -// The priamry reason to use this is when exporting data across the C Data +// The primary reason to use this is when exporting data across the C Data // Interface. CGO requires that pointers to Go memory are not stored in C // memory, which is exactly what the C Data Interface would otherwise // require. By allocating with Mallocator up front, we can safely export the @@ -46,9 +44,22 @@ import ( // The build tag 'mallocator' will also make this the default allocator. type Mallocator struct { allocatedBytes uint64 + // We want to align allocations, but since we only get/return []byte, + // we need to remember the "real" address for Free somehow + realAllocations sync.Map + alignment int } -func NewMallocator() *Mallocator { return &Mallocator{} } +func NewMallocator() *Mallocator { return &Mallocator{alignment: 64} } + +func NewMallocatorWithAlignment(alignment int) *Mallocator { + if alignment < 1 { + panic("mallocator: invalid alignment (must be positive)") + } else if alignment > 1 && (alignment&(alignment-1)) != 0 { + panic("mallocator: invalid alignment (must be power of 2)") + } + return &Mallocator{alignment: alignment} +} func (alloc *Mallocator) Allocate(size int) []byte { // Use calloc to zero-initialize memory. @@ -59,41 +70,62 @@ func (alloc *Mallocator) Allocate(size int) []byte { if size < 0 { panic("mallocator: negative size") } - ptr, err := C.calloc(C.size_t(size), 1) + paddedSize := C.size_t(size + alloc.alignment) + ptr, err := C.calloc(paddedSize, 1) if err != nil { - panic(err) + // under some circumstances and allocation patterns, we can end up in a scenario + // where for some reason calloc return ENOMEM even though there is definitely memory + // available for use. So we attempt to fallback to simply doing malloc + memset in + // this case. If malloc returns a nil pointer, then we know we're out of memory + // and will surface the error. + if ptr = C.malloc(paddedSize); ptr == nil { + panic(err) + } + C.memset(ptr, 0, paddedSize) } else if ptr == nil { panic("mallocator: out of memory") } + + buf := unsafe.Slice((*byte)(ptr), paddedSize) + aligned := roundToPowerOf2(uintptr(ptr), uintptr(alloc.alignment)) + alloc.realAllocations.Store(aligned, uintptr(ptr)) atomic.AddUint64(&alloc.allocatedBytes, uint64(size)) - return unsafe.Slice((*byte)(ptr), size) + + if uintptr(ptr) != aligned { + shift := aligned - uintptr(ptr) + return buf[shift : uintptr(size)+shift : uintptr(size)+shift] + } + return buf[:size:size] } func (alloc *Mallocator) Free(b []byte) { - sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - C.free(unsafe.Pointer(sh.Data)) + sz := len(b) + ptr := getPtr(b) + realAddr, loaded := alloc.realAllocations.LoadAndDelete(uintptr(ptr)) + if !loaded { + // double-free? + return + } + realPtr := unsafe.Pointer(realAddr.(uintptr)) + C.free(realPtr) // Subtract sh.Len via two's complement (since atomic doesn't offer subtract) - atomic.AddUint64(&alloc.allocatedBytes, ^(uint64(sh.Len) - 1)) + atomic.AddUint64(&alloc.allocatedBytes, ^(uint64(sz) - 1)) } func (alloc *Mallocator) Reallocate(size int, b []byte) []byte { if size < 0 { panic("mallocator: negative size") } - sh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - ptr, err := C.realloc_and_initialize(unsafe.Pointer(sh.Data), C.size_t(sh.Cap), C.size_t(size)) - if err != nil { - panic(err) - } else if ptr == nil && size != 0 { - panic("mallocator: out of memory") - } - delta := size - len(b) - if delta >= 0 { - atomic.AddUint64(&alloc.allocatedBytes, uint64(delta)) - } else { - atomic.AddUint64(&alloc.allocatedBytes, ^(uint64(-delta) - 1)) + + if cap(b) >= size { + diff := size - len(b) + atomic.AddUint64(&alloc.allocatedBytes, uint64(diff)) + return b[:size] } - return unsafe.Slice((*byte)(ptr), size) + newBuf := alloc.Allocate(size) + copy(newBuf, b) + alloc.Free(b) + return newBuf } func (alloc *Mallocator) AllocatedBytes() int64 { diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/mallocator_util.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/mallocator_util.go new file mode 100644 index 00000000..0ab5f8f5 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/mallocator/mallocator_util.go @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build go1.20 || tinygo + +package mallocator + +import "unsafe" + +func getPtr(b []byte) unsafe.Pointer { + return unsafe.Pointer(unsafe.SliceData(b)) +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_amd64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_amd64.go index 58356d64..895ddc07 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_amd64.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noasm // +build !noasm package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_arm64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_arm64.go index 3db5d110..52603349 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_arm64.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noasm // +build !noasm package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_avx2_amd64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_avx2_amd64.go index 2bd851ea..39fb3a5f 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_avx2_amd64.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noasm // +build !noasm package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_js_wasm.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_js_wasm.go index 9b94d99f..5cc0c84d 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_js_wasm.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build wasm // +build wasm package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_neon_arm64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_neon_arm64.go index 6cb0400c..806ca575 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_neon_arm64.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noasm // +build !noasm package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_neon_arm64.s similarity index 82% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_neon_arm64.s index 18655cc7..18b0af5c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_neon_arm64.s @@ -11,6 +11,10 @@ TEXT ·_memset_neon(SB), $0-24 MOVD len+8(FP), R1 MOVD c+16(FP), R2 + // The Go ABI saves the frame pointer register one word below the + // caller's frame. Make room so we don't overwrite it. Needs to stay + // 16-byte aligned + SUB $16, RSP WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! WORD $0x8b010008 // add x8, x0, x1 WORD $0xeb00011f // cmp x8, x0 @@ -40,4 +44,6 @@ LBB0_6: BNE LBB0_6 LBB0_7: WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_noasm.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_noasm.go index bf8846fa..44f19c09 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_noasm.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build noasm // +build noasm package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_sse4_amd64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_sse4_amd64.go index 716c0d27..1711a1ee 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_sse4_amd64.go @@ -14,6 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !noasm // +build !noasm package memory diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/memory_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go b/vendor/github.com/apache/arrow-go/v18/arrow/memory/util.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go rename to vendor/github.com/apache/arrow-go/v18/arrow/memory/util.go index 3b0d3a5c..6cc7ec91 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/memory/util.go @@ -35,3 +35,11 @@ func isMultipleOfPowerOf2(v int, d int) bool { func addressOf(b []byte) uintptr { return uintptr(unsafe.Pointer(&b[0])) } + +func ReleaseBuffers(buffers []*Buffer) { + for _, b := range buffers { + if b != nil { + b.Release() + } + } +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json b/vendor/github.com/apache/arrow-go/v18/arrow/numeric.schema.json similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json rename to vendor/github.com/apache/arrow-go/v18/arrow/numeric.schema.json diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata b/vendor/github.com/apache/arrow-go/v18/arrow/numeric.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata rename to vendor/github.com/apache/arrow-go/v18/arrow/numeric.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/record.go b/vendor/github.com/apache/arrow-go/v18/arrow/record.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/record.go rename to vendor/github.com/apache/arrow-go/v18/arrow/record.go index d98c7732..4fd6b13c 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/record.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/record.go @@ -16,7 +16,7 @@ package arrow -import "github.com/apache/arrow/go/v14/internal/json" +import "github.com/apache/arrow-go/v18/internal/json" // Record is a collection of equal-length arrays matching a particular Schema. // Also known as a RecordBatch in the spec and in some implementations. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/append.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/append.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/append.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/append.go index 9bcfe3e2..0525bc81 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/append.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/append.go @@ -21,11 +21,11 @@ package scalar import ( "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/float16" "golang.org/x/exp/constraints" ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/binary.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/binary.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/binary.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/binary.go index 3c041210..6b11eb3e 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/binary.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/binary.go @@ -21,8 +21,8 @@ import ( "fmt" "unicode/utf8" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/memory" ) type BinaryScalar interface { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/compare.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/compare.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/compare.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/compare.go index be7fa4d0..e69eb9eb 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/compare.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/compare.go @@ -16,7 +16,7 @@ package scalar -import "github.com/apache/arrow/go/v14/arrow" +import "github.com/apache/arrow-go/v18/arrow" // Equals returns true if two scalars are equal, which means they have the same // datatype, validity and value. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/nested.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/nested.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/nested.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/nested.go index 87e84210..5bca34a0 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/nested.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/nested.go @@ -21,10 +21,10 @@ import ( "errors" "fmt" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" "golang.org/x/xerrors" ) @@ -132,7 +132,7 @@ func NewLargeListScalarData(val arrow.ArrayData) *LargeList { } func makeMapType(typ *arrow.StructType) *arrow.MapType { - debug.Assert(len(typ.Fields()) == 2, "must pass struct with only 2 fields for MapScalar") + debug.Assert(typ.NumFields() == 2, "must pass struct with only 2 fields for MapScalar") return arrow.MapOf(typ.Field(0).Type, typ.Field(1).Type) } @@ -265,7 +265,7 @@ func (s *Struct) Validate() (err error) { } st := s.Type.(*arrow.StructType) - num := len(st.Fields()) + num := st.NumFields() if len(s.Value) != num { return fmt.Errorf("non-null %s scalar should have %d child values, got %d", s.Type, num, len(s.Value)) } @@ -303,7 +303,7 @@ func (s *Struct) ValidateFull() (err error) { } st := s.Type.(*arrow.StructType) - num := len(st.Fields()) + num := st.NumFields() if len(s.Value) != num { return fmt.Errorf("non-null %s scalar should have %d child values, got %d", s.Type, num, len(s.Value)) } @@ -571,8 +571,8 @@ func (s *SparseUnion) Release() { func (s *SparseUnion) Validate() (err error) { dt := s.Type.(*arrow.SparseUnionType) - if len(dt.Fields()) != len(s.Value) { - return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", len(dt.Fields()), len(s.Value)) + if dt.NumFields() != len(s.Value) { + return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", dt.NumFields(), len(s.Value)) } if s.TypeCode < 0 || int(s.TypeCode) >= len(dt.ChildIDs()) || dt.ChildIDs()[s.TypeCode] == arrow.InvalidUnionChildID { @@ -593,8 +593,8 @@ func (s *SparseUnion) Validate() (err error) { func (s *SparseUnion) ValidateFull() (err error) { dt := s.Type.(*arrow.SparseUnionType) - if len(dt.Fields()) != len(s.Value) { - return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", len(dt.Fields()), len(s.Value)) + if dt.NumFields() != len(s.Value) { + return fmt.Errorf("sparse union scalar value had %d fields but type has %d fields", dt.NumFields(), len(s.Value)) } if s.TypeCode < 0 || int(s.TypeCode) >= len(dt.ChildIDs()) || dt.ChildIDs()[s.TypeCode] == arrow.InvalidUnionChildID { @@ -643,7 +643,7 @@ func NewSparseUnionScalar(val []Scalar, code arrow.UnionTypeCode, dt *arrow.Spar func NewSparseUnionScalarFromValue(val Scalar, idx int, dt *arrow.SparseUnionType) *SparseUnion { code := dt.TypeCodes()[idx] - values := make([]Scalar, len(dt.Fields())) + values := make([]Scalar, dt.NumFields()) for i, f := range dt.Fields() { if i == idx { values[i] = val diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen.go index 90ae2a70..cb8513c5 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen.go @@ -24,9 +24,9 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" ) type Int8 struct { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpldata b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen.go.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpldata rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen.go.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen_test.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen_test.go.tmpl similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen_test.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen_test.go.tmpl index c975cc9d..37f1b867 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen_test.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/numeric.gen_test.go.tmpl @@ -19,8 +19,8 @@ package scalar_test import ( "testing" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/scalar" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/parse.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/parse.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/parse.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/parse.go index fcffe1ea..3ac57738 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/parse.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/parse.go @@ -25,12 +25,12 @@ import ( "strings" "time" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/arrow/memory" ) type TypeToScalar interface { @@ -329,7 +329,7 @@ func fromListScalar(s ListScalar, v reflect.Value) error { } case *array.Map: // only implementing slice of metadata for now - if v.Type().Elem() != reflect.PtrTo(reflect.TypeOf(arrow.Metadata{})) { + if v.Type().Elem() != reflect.PointerTo(reflect.TypeOf(arrow.Metadata{})) { return fmt.Errorf("unimplemented fromListScalar type %s to %s", arr.DataType(), v.Type().String()) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/scalar.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/scalar.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/scalar.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/scalar.go index 395771fa..0f8ec616 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/scalar.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/scalar.go @@ -26,16 +26,16 @@ import ( "strconv" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/array" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/encoded" - "github.com/apache/arrow/go/v14/arrow/endian" - "github.com/apache/arrow/go/v14/arrow/float16" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/decimal128" + "github.com/apache/arrow-go/v18/arrow/decimal256" + "github.com/apache/arrow-go/v18/arrow/encoded" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/float16" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/memory" "golang.org/x/xerrors" ) @@ -512,7 +512,7 @@ func init() { arrow.LIST: func(dt arrow.DataType) Scalar { return &List{scalar: scalar{dt, false}} }, arrow.STRUCT: func(dt arrow.DataType) Scalar { typ := dt.(*arrow.StructType) - values := make([]Scalar, len(typ.Fields())) + values := make([]Scalar, typ.NumFields()) for i, f := range typ.Fields() { values[i] = MakeNullScalar(f.Type) } @@ -520,10 +520,10 @@ func init() { }, arrow.SPARSE_UNION: func(dt arrow.DataType) Scalar { typ := dt.(*arrow.SparseUnionType) - if len(typ.Fields()) == 0 { + if typ.NumFields() == 0 { panic("cannot make scalar of empty union type") } - values := make([]Scalar, len(typ.Fields())) + values := make([]Scalar, typ.NumFields()) for i, f := range typ.Fields() { values[i] = MakeNullScalar(f.Type) } @@ -531,7 +531,7 @@ func init() { }, arrow.DENSE_UNION: func(dt arrow.DataType) Scalar { typ := dt.(*arrow.DenseUnionType) - if len(typ.Fields()) == 0 { + if typ.NumFields() == 0 { panic("cannot make scalar of empty union type") } return NewDenseUnionScalar(MakeNullScalar(typ.Fields()[0].Type), typ.TypeCodes()[0], typ) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/temporal.go b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/temporal.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/arrow/scalar/temporal.go rename to vendor/github.com/apache/arrow-go/v18/arrow/scalar/temporal.go index 880416f7..42a46738 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/scalar/temporal.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/scalar/temporal.go @@ -22,7 +22,7 @@ import ( "time" "unsafe" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) func temporalToString(s TemporalScalar) string { diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/schema.go b/vendor/github.com/apache/arrow-go/v18/arrow/schema.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/schema.go rename to vendor/github.com/apache/arrow-go/v18/arrow/schema.go index a7fa4341..65702e7b 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/schema.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/schema.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/endian" ) type Metadata struct { @@ -259,8 +259,8 @@ func (s *Schema) AddField(i int, field Field) (*Schema, error) { func (s *Schema) String() string { o := new(strings.Builder) - fmt.Fprintf(o, "schema:\n fields: %d\n", len(s.Fields())) - for i, f := range s.Fields() { + fmt.Fprintf(o, "schema:\n fields: %d\n", s.NumFields()) + for i, f := range s.fields { if i > 0 { o.WriteString("\n") } @@ -282,7 +282,7 @@ func (s *Schema) Fingerprint() string { var b strings.Builder b.WriteString("S{") - for _, f := range s.Fields() { + for _, f := range s.fields { fieldFingerprint := f.Fingerprint() if fieldFingerprint == "" { return "" diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/table.go b/vendor/github.com/apache/arrow-go/v18/arrow/table.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/arrow/table.go rename to vendor/github.com/apache/arrow-go/v18/arrow/table.go index 5a68085f..6d19d9f1 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/table.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/table.go @@ -20,7 +20,7 @@ import ( "fmt" "sync/atomic" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) // Table represents a logical sequence of chunked arrays of equal length. It is @@ -39,6 +39,8 @@ type Table interface { Retain() Release() + + fmt.Stringer } // Column is an immutable column data structure consisting of @@ -47,7 +49,7 @@ type Table interface { // To get strongly typed data from a Column, you need to iterate the // chunks and type assert each individual Array. For example: // -// switch column.DataType().ID { +// switch column.DataType().ID() { // case arrow.INT32: // for _, c := range column.Data().Chunks() { // arr := c.(*array.Int32) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/tools.go b/vendor/github.com/apache/arrow-go/v18/arrow/tools.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/tools.go rename to vendor/github.com/apache/arrow-go/v18/arrow/tools.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_string.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_string.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_string.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_string.go index ee3ccb7e..6e5a943d 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_string.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_string.go @@ -51,11 +51,13 @@ func _() { _ = x[BINARY_VIEW-40] _ = x[LIST_VIEW-41] _ = x[LARGE_LIST_VIEW-42] + _ = x[DECIMAL32-43] + _ = x[DECIMAL64-44] } -const _Type_name = "NULLBOOLUINT8INT8UINT16INT16UINT32INT32UINT64INT64FLOAT16FLOAT32FLOAT64STRINGBINARYFIXED_SIZE_BINARYDATE32DATE64TIMESTAMPTIME32TIME64INTERVAL_MONTHSINTERVAL_DAY_TIMEDECIMAL128DECIMAL256LISTSTRUCTSPARSE_UNIONDENSE_UNIONDICTIONARYMAPEXTENSIONFIXED_SIZE_LISTDURATIONLARGE_STRINGLARGE_BINARYLARGE_LISTINTERVAL_MONTH_DAY_NANORUN_END_ENCODEDSTRING_VIEWBINARY_VIEWLIST_VIEWLARGE_LIST_VIEW" +const _Type_name = "NULLBOOLUINT8INT8UINT16INT16UINT32INT32UINT64INT64FLOAT16FLOAT32FLOAT64STRINGBINARYFIXED_SIZE_BINARYDATE32DATE64TIMESTAMPTIME32TIME64INTERVAL_MONTHSINTERVAL_DAY_TIMEDECIMAL128DECIMAL256LISTSTRUCTSPARSE_UNIONDENSE_UNIONDICTIONARYMAPEXTENSIONFIXED_SIZE_LISTDURATIONLARGE_STRINGLARGE_BINARYLARGE_LISTINTERVAL_MONTH_DAY_NANORUN_END_ENCODEDSTRING_VIEWBINARY_VIEWLIST_VIEWLARGE_LIST_VIEWDECIMAL32DECIMAL64" -var _Type_index = [...]uint16{0, 4, 8, 13, 17, 23, 28, 34, 39, 45, 50, 57, 64, 71, 77, 83, 100, 106, 112, 121, 127, 133, 148, 165, 175, 185, 189, 195, 207, 218, 228, 231, 240, 255, 263, 275, 287, 297, 320, 335, 346, 357, 366, 381} +var _Type_index = [...]uint16{0, 4, 8, 13, 17, 23, 28, 34, 39, 45, 50, 57, 64, 71, 77, 83, 100, 106, 112, 121, 127, 133, 148, 165, 175, 185, 189, 195, 207, 218, 228, 231, 240, 255, 263, 275, 287, 297, 320, 335, 346, 357, 366, 381, 390, 399} func (i Type) String() string { if i < 0 || i >= Type(len(_Type_index)-1) { diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/type_traits.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits.go new file mode 100644 index 00000000..7185ef25 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits.go @@ -0,0 +1,161 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/float16" + "golang.org/x/exp/constraints" +) + +// IntType is a type constraint for raw values represented as signed +// integer types by We aren't just using constraints.Signed +// because we don't want to include the raw `int` type here whose size +// changes based on the architecture (int32 on 32-bit architectures and +// int64 on 64-bit architectures). +// +// This will also cover types like MonthInterval or the time types +// as their underlying types are int32 and int64 which will get covered +// by using the ~ +type IntType interface { + ~int8 | ~int16 | ~int32 | ~int64 +} + +// UintType is a type constraint for raw values represented as unsigned +// integer types by We aren't just using constraints.Unsigned +// because we don't want to include the raw `uint` type here whose size +// changes based on the architecture (uint32 on 32-bit architectures and +// uint64 on 64-bit architectures). We also don't want to include uintptr +type UintType interface { + ~uint8 | ~uint16 | ~uint32 | ~uint64 +} + +// FloatType is a type constraint for raw values for representing +// floating point values in This consists of constraints.Float and +// float16.Num +type FloatType interface { + float16.Num | constraints.Float +} + +// NumericType is a type constraint for just signed/unsigned integers +// and float32/float64. +type NumericType interface { + IntType | UintType | constraints.Float +} + +// FixedWidthType is a type constraint for raw values in Arrow that +// can be represented as FixedWidth byte slices. Specifically this is for +// using Go generics to easily re-type a byte slice to a properly-typed +// slice. Booleans are excluded here since they are represented by Arrow +// as a bitmap and thus the buffer can't be just reinterpreted as a []bool +type FixedWidthType interface { + IntType | UintType | + FloatType | decimal.DecimalTypes | + DayTimeInterval | MonthDayNanoInterval +} + +type TemporalType interface { + Date32 | Date64 | Time32 | Time64 | + Timestamp | Duration | DayTimeInterval | + MonthInterval | MonthDayNanoInterval +} + +func reinterpretSlice[Out, T any](b []T) []Out { + if cap(b) == 0 { + return nil + } + out := (*Out)(unsafe.Pointer(&b[:1][0])) + + lenBytes := len(b) * int(unsafe.Sizeof(b[0])) + capBytes := cap(b) * int(unsafe.Sizeof(b[0])) + + lenOut := lenBytes / int(unsafe.Sizeof(*out)) + capOut := capBytes / int(unsafe.Sizeof(*out)) + + return unsafe.Slice(out, capOut)[:lenOut] +} + +// GetValues reinterprets the data.Buffers()[i] to a slice of T with len=data.Len(). +// +// If the buffer is nil, nil will be returned. +// +// NOTE: the buffer's length must be a multiple of Sizeof(T). +func GetValues[T FixedWidthType](data ArrayData, i int) []T { + if data.Buffers()[i] == nil || data.Buffers()[i].Len() == 0 { + return nil + } + return reinterpretSlice[T](data.Buffers()[i].Bytes())[data.Offset() : data.Offset()+data.Len()] +} + +// GetOffsets reinterprets the data.Buffers()[i] to a slice of T with len=data.Len()+1. +// +// NOTE: the buffer's length must be a multiple of Sizeof(T). +func GetOffsets[T int32 | int64](data ArrayData, i int) []T { + return reinterpretSlice[T](data.Buffers()[i].Bytes())[data.Offset() : data.Offset()+data.Len()+1] +} + +// GetBytes reinterprets a slice of T to a slice of bytes. +func GetBytes[T FixedWidthType | ViewHeader](in []T) []byte { + return reinterpretSlice[byte](in) +} + +// GetData reinterprets a slice of bytes to a slice of T. +// +// NOTE: the buffer's length must be a multiple of Sizeof(T). +func GetData[T FixedWidthType | ViewHeader](in []byte) []T { + return reinterpretSlice[T](in) +} + +var typMap = map[reflect.Type]DataType{ + reflect.TypeOf(false): FixedWidthTypes.Boolean, + reflect.TypeOf(int8(0)): PrimitiveTypes.Int8, + reflect.TypeOf(int16(0)): PrimitiveTypes.Int16, + reflect.TypeOf(int32(0)): PrimitiveTypes.Int32, + reflect.TypeOf(int64(0)): PrimitiveTypes.Int64, + reflect.TypeOf(uint8(0)): PrimitiveTypes.Uint8, + reflect.TypeOf(uint16(0)): PrimitiveTypes.Uint16, + reflect.TypeOf(uint32(0)): PrimitiveTypes.Uint32, + reflect.TypeOf(uint64(0)): PrimitiveTypes.Uint64, + reflect.TypeOf(float32(0)): PrimitiveTypes.Float32, + reflect.TypeOf(float64(0)): PrimitiveTypes.Float64, + reflect.TypeOf(string("")): BinaryTypes.String, + reflect.TypeOf(Date32(0)): FixedWidthTypes.Date32, + reflect.TypeOf(Date64(0)): FixedWidthTypes.Date64, + reflect.TypeOf(true): FixedWidthTypes.Boolean, + reflect.TypeOf(float16.Num{}): FixedWidthTypes.Float16, + reflect.TypeOf([]byte{}): BinaryTypes.Binary, +} + +// GetDataType returns the appropriate DataType for the given type T +// only for non-parametric types. This uses a map and reflection internally +// so don't call this in a tight loop, instead call this once and then use +// a closure with the result. +func GetDataType[T NumericType | bool | string | []byte | float16.Num]() DataType { + var z T + return typMap[reflect.TypeOf(z)] +} + +// GetType returns the appropriate Type type T, only for non-parametric +// types. This uses a map and reflection internally so don't call this in +// a tight loop, instead call it once and then use a closure with the result. +func GetType[T NumericType | bool | string]() Type { + var z T + return typMap[reflect.TypeOf(z)].ID() +} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_boolean.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_boolean.go index 6a46bdec..98d27330 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_boolean.go @@ -17,7 +17,7 @@ package arrow import ( - "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/bitutil" ) type booleanTraits struct{} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal128.go similarity index 66% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal128.go index d2d3aae3..6e416cd6 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal128.go @@ -17,11 +17,10 @@ package arrow import ( - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/endian" ) // Decimal128 traits @@ -29,7 +28,7 @@ var Decimal128Traits decimal128Traits const ( // Decimal128SizeBytes specifies the number of bytes required to store a single decimal128 in memory - Decimal128SizeBytes = int(unsafe.Sizeof(decimal128.Num{})) + Decimal128SizeBytes = int(unsafe.Sizeof(decimal.Decimal128{})) ) type decimal128Traits struct{} @@ -38,7 +37,7 @@ type decimal128Traits struct{} func (decimal128Traits) BytesRequired(n int) int { return Decimal128SizeBytes * n } // PutValue -func (decimal128Traits) PutValue(b []byte, v decimal128.Num) { +func (decimal128Traits) PutValue(b []byte, v decimal.Decimal128) { endian.Native.PutUint64(b[:8], uint64(v.LowBits())) endian.Native.PutUint64(b[8:], uint64(v.HighBits())) } @@ -46,18 +45,14 @@ func (decimal128Traits) PutValue(b []byte, v decimal128.Num) { // CastFromBytes reinterprets the slice b to a slice of type uint16. // // NOTE: len(b) must be a multiple of Uint16SizeBytes. -func (decimal128Traits) CastFromBytes(b []byte) []decimal128.Num { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*decimal128.Num)(unsafe.Pointer(h.Data)), cap(b)/Decimal128SizeBytes)[:len(b)/Decimal128SizeBytes] +func (decimal128Traits) CastFromBytes(b []byte) []decimal.Decimal128 { + return GetData[decimal.Decimal128](b) } // CastToBytes reinterprets the slice b to a slice of bytes. -func (decimal128Traits) CastToBytes(b []decimal128.Num) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Decimal128SizeBytes)[:len(b)*Decimal128SizeBytes] +func (decimal128Traits) CastToBytes(b []decimal.Decimal128) []byte { + return GetBytes(b) } // Copy copies src to dst. -func (decimal128Traits) Copy(dst, src []decimal128.Num) { copy(dst, src) } +func (decimal128Traits) Copy(dst, src []decimal.Decimal128) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal256.go similarity index 60% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal256.go index 256ed68f..b196c2e7 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal256.go @@ -17,25 +17,24 @@ package arrow import ( - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/endian" ) // Decimal256 traits var Decimal256Traits decimal256Traits const ( - Decimal256SizeBytes = int(unsafe.Sizeof(decimal256.Num{})) + Decimal256SizeBytes = int(unsafe.Sizeof(decimal.Decimal256{})) ) type decimal256Traits struct{} func (decimal256Traits) BytesRequired(n int) int { return Decimal256SizeBytes * n } -func (decimal256Traits) PutValue(b []byte, v decimal256.Num) { +func (decimal256Traits) PutValue(b []byte, v decimal.Decimal256) { for i, a := range v.Array() { start := i * 8 endian.Native.PutUint64(b[start:], a) @@ -43,16 +42,12 @@ func (decimal256Traits) PutValue(b []byte, v decimal256.Num) { } // CastFromBytes reinterprets the slice b to a slice of decimal256 -func (decimal256Traits) CastFromBytes(b []byte) []decimal256.Num { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*decimal256.Num)(unsafe.Pointer(h.Data)), cap(b)/Decimal256SizeBytes)[:len(b)/Decimal256SizeBytes] +func (decimal256Traits) CastFromBytes(b []byte) []decimal.Decimal256 { + return GetData[decimal.Decimal256](b) } -func (decimal256Traits) CastToBytes(b []decimal256.Num) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Decimal256SizeBytes)[:len(b)*Decimal256SizeBytes] +func (decimal256Traits) CastToBytes(b []decimal.Decimal256) []byte { + return GetBytes(b) } -func (decimal256Traits) Copy(dst, src []decimal256.Num) { copy(dst, src) } +func (decimal256Traits) Copy(dst, src []decimal.Decimal256) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal32.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal32.go new file mode 100644 index 00000000..ebca65f6 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal32.go @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/endian" +) + +// Decimal32 traits +var Decimal32Traits decimal32Traits + +const ( + // Decimal32SizeBytes specifies the number of bytes required to store a single decimal32 in memory + Decimal32SizeBytes = int(unsafe.Sizeof(decimal.Decimal32(0))) +) + +type decimal32Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (decimal32Traits) BytesRequired(n int) int { return Decimal32SizeBytes * n } + +// PutValue +func (decimal32Traits) PutValue(b []byte, v decimal.Decimal32) { + endian.Native.PutUint32(b[:4], uint32(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (decimal32Traits) CastFromBytes(b []byte) []decimal.Decimal32 { + return GetData[decimal.Decimal32](b) +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (decimal32Traits) CastToBytes(b []decimal.Decimal32) []byte { + return GetBytes(b) +} + +// Copy copies src to dst. +func (decimal32Traits) Copy(dst, src []decimal.Decimal32) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal64.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal64.go new file mode 100644 index 00000000..bd07883a --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_decimal64.go @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "unsafe" + + "github.com/apache/arrow-go/v18/arrow/decimal" + "github.com/apache/arrow-go/v18/arrow/endian" +) + +// Decimal64 traits +var Decimal64Traits decimal64Traits + +const ( + // Decimal64SizeBytes specifies the number of bytes required to store a single decimal64 in memory + Decimal64SizeBytes = int(unsafe.Sizeof(decimal.Decimal64(0))) +) + +type decimal64Traits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (decimal64Traits) BytesRequired(n int) int { return Decimal64SizeBytes * n } + +// PutValue +func (decimal64Traits) PutValue(b []byte, v decimal.Decimal64) { + endian.Native.PutUint64(b[:8], uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type uint16. +// +// NOTE: len(b) must be a multiple of Uint16SizeBytes. +func (decimal64Traits) CastFromBytes(b []byte) []decimal.Decimal64 { + return GetData[decimal.Decimal64](b) +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (decimal64Traits) CastToBytes(b []decimal.Decimal64) []byte { + return GetBytes(b) +} + +// Copy copies src to dst. +func (decimal64Traits) Copy(dst, src []decimal.Decimal64) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_float16.go similarity index 80% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_float16.go index c40363d3..aaba28ad 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_float16.go @@ -17,11 +17,10 @@ package arrow import ( - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/endian" - "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/float16" ) // Float16 traits @@ -46,16 +45,12 @@ func (float16Traits) PutValue(b []byte, v float16.Num) { // // NOTE: len(b) must be a multiple of Uint16SizeBytes. func (float16Traits) CastFromBytes(b []byte) []float16.Num { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*float16.Num)(unsafe.Pointer(h.Data)), cap(b)/Float16SizeBytes)[:len(b)/Float16SizeBytes] + return GetData[float16.Num](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (float16Traits) CastToBytes(b []float16.Num) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float16SizeBytes)[:len(b)*Float16SizeBytes] + return GetBytes(b) } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_interval.go similarity index 78% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_interval.go index 35e60570..149683c8 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_interval.go @@ -17,11 +17,10 @@ package arrow import ( - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/endian" - "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal/debug" ) var ( @@ -57,16 +56,12 @@ func (monthTraits) PutValue(b []byte, v MonthInterval) { // // NOTE: len(b) must be a multiple of MonthIntervalSizeBytes. func (monthTraits) CastFromBytes(b []byte) []MonthInterval { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*MonthInterval)(unsafe.Pointer(h.Data)), cap(b)/MonthIntervalSizeBytes)[:len(b)/MonthIntervalSizeBytes] + return GetData[MonthInterval](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (monthTraits) CastToBytes(b []MonthInterval) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*MonthIntervalSizeBytes)[:len(b)*MonthIntervalSizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -94,16 +89,12 @@ func (daytimeTraits) PutValue(b []byte, v DayTimeInterval) { // // NOTE: len(b) must be a multiple of DayTimeIntervalSizeBytes. func (daytimeTraits) CastFromBytes(b []byte) []DayTimeInterval { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*DayTimeInterval)(unsafe.Pointer(h.Data)), cap(b)/DayTimeIntervalSizeBytes)[:len(b)/DayTimeIntervalSizeBytes] + return GetData[DayTimeInterval](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (daytimeTraits) CastToBytes(b []DayTimeInterval) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*DayTimeIntervalSizeBytes)[:len(b)*DayTimeIntervalSizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -132,16 +123,12 @@ func (monthDayNanoTraits) PutValue(b []byte, v MonthDayNanoInterval) { // // NOTE: len(b) must be a multiple of MonthDayNanoIntervalSizeBytes. func (monthDayNanoTraits) CastFromBytes(b []byte) []MonthDayNanoInterval { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*MonthDayNanoInterval)(unsafe.Pointer(h.Data)), cap(b)/MonthDayNanoIntervalSizeBytes)[:len(b)/MonthDayNanoIntervalSizeBytes] + return GetData[MonthDayNanoInterval](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (monthDayNanoTraits) CastToBytes(b []MonthDayNanoInterval) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*MonthDayNanoIntervalSizeBytes)[:len(b)*MonthDayNanoIntervalSizeBytes] + return GetBytes(b) } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen.go similarity index 75% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen.go index 6edd7529..e88b61bf 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen.go @@ -20,10 +20,9 @@ package arrow import ( "math" - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/endian" ) var ( @@ -65,16 +64,12 @@ func (int64Traits) PutValue(b []byte, v int64) { // // NOTE: len(b) must be a multiple of Int64SizeBytes. func (int64Traits) CastFromBytes(b []byte) []int64 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*int64)(unsafe.Pointer(h.Data)), cap(b)/Int64SizeBytes)[:len(b)/Int64SizeBytes] + return GetData[int64](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (int64Traits) CastToBytes(b []int64) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int64SizeBytes)[:len(b)*Int64SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -101,16 +96,12 @@ func (uint64Traits) PutValue(b []byte, v uint64) { // // NOTE: len(b) must be a multiple of Uint64SizeBytes. func (uint64Traits) CastFromBytes(b []byte) []uint64 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*uint64)(unsafe.Pointer(h.Data)), cap(b)/Uint64SizeBytes)[:len(b)/Uint64SizeBytes] + return GetData[uint64](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint64Traits) CastToBytes(b []uint64) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint64SizeBytes)[:len(b)*Uint64SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -137,16 +128,12 @@ func (float64Traits) PutValue(b []byte, v float64) { // // NOTE: len(b) must be a multiple of Float64SizeBytes. func (float64Traits) CastFromBytes(b []byte) []float64 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*float64)(unsafe.Pointer(h.Data)), cap(b)/Float64SizeBytes)[:len(b)/Float64SizeBytes] + return GetData[float64](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (float64Traits) CastToBytes(b []float64) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float64SizeBytes)[:len(b)*Float64SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -173,16 +160,12 @@ func (int32Traits) PutValue(b []byte, v int32) { // // NOTE: len(b) must be a multiple of Int32SizeBytes. func (int32Traits) CastFromBytes(b []byte) []int32 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*int32)(unsafe.Pointer(h.Data)), cap(b)/Int32SizeBytes)[:len(b)/Int32SizeBytes] + return GetData[int32](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (int32Traits) CastToBytes(b []int32) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int32SizeBytes)[:len(b)*Int32SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -209,16 +192,12 @@ func (uint32Traits) PutValue(b []byte, v uint32) { // // NOTE: len(b) must be a multiple of Uint32SizeBytes. func (uint32Traits) CastFromBytes(b []byte) []uint32 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*uint32)(unsafe.Pointer(h.Data)), cap(b)/Uint32SizeBytes)[:len(b)/Uint32SizeBytes] + return GetData[uint32](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint32Traits) CastToBytes(b []uint32) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint32SizeBytes)[:len(b)*Uint32SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -245,16 +224,12 @@ func (float32Traits) PutValue(b []byte, v float32) { // // NOTE: len(b) must be a multiple of Float32SizeBytes. func (float32Traits) CastFromBytes(b []byte) []float32 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*float32)(unsafe.Pointer(h.Data)), cap(b)/Float32SizeBytes)[:len(b)/Float32SizeBytes] + return GetData[float32](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (float32Traits) CastToBytes(b []float32) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float32SizeBytes)[:len(b)*Float32SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -281,16 +256,12 @@ func (int16Traits) PutValue(b []byte, v int16) { // // NOTE: len(b) must be a multiple of Int16SizeBytes. func (int16Traits) CastFromBytes(b []byte) []int16 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*int16)(unsafe.Pointer(h.Data)), cap(b)/Int16SizeBytes)[:len(b)/Int16SizeBytes] + return GetData[int16](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (int16Traits) CastToBytes(b []int16) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int16SizeBytes)[:len(b)*Int16SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -317,16 +288,12 @@ func (uint16Traits) PutValue(b []byte, v uint16) { // // NOTE: len(b) must be a multiple of Uint16SizeBytes. func (uint16Traits) CastFromBytes(b []byte) []uint16 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*uint16)(unsafe.Pointer(h.Data)), cap(b)/Uint16SizeBytes)[:len(b)/Uint16SizeBytes] + return GetData[uint16](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint16Traits) CastToBytes(b []uint16) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint16SizeBytes)[:len(b)*Uint16SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -353,16 +320,12 @@ func (int8Traits) PutValue(b []byte, v int8) { // // NOTE: len(b) must be a multiple of Int8SizeBytes. func (int8Traits) CastFromBytes(b []byte) []int8 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*int8)(unsafe.Pointer(h.Data)), cap(b)/Int8SizeBytes)[:len(b)/Int8SizeBytes] + return GetData[int8](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (int8Traits) CastToBytes(b []int8) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int8SizeBytes)[:len(b)*Int8SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -389,16 +352,12 @@ func (uint8Traits) PutValue(b []byte, v uint8) { // // NOTE: len(b) must be a multiple of Uint8SizeBytes. func (uint8Traits) CastFromBytes(b []byte) []uint8 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*uint8)(unsafe.Pointer(h.Data)), cap(b)/Uint8SizeBytes)[:len(b)/Uint8SizeBytes] + return GetData[uint8](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint8Traits) CastToBytes(b []uint8) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint8SizeBytes)[:len(b)*Uint8SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -425,16 +384,12 @@ func (time32Traits) PutValue(b []byte, v Time32) { // // NOTE: len(b) must be a multiple of Time32SizeBytes. func (time32Traits) CastFromBytes(b []byte) []Time32 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*Time32)(unsafe.Pointer(h.Data)), cap(b)/Time32SizeBytes)[:len(b)/Time32SizeBytes] + return GetData[Time32](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (time32Traits) CastToBytes(b []Time32) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Time32SizeBytes)[:len(b)*Time32SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -461,16 +416,12 @@ func (time64Traits) PutValue(b []byte, v Time64) { // // NOTE: len(b) must be a multiple of Time64SizeBytes. func (time64Traits) CastFromBytes(b []byte) []Time64 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*Time64)(unsafe.Pointer(h.Data)), cap(b)/Time64SizeBytes)[:len(b)/Time64SizeBytes] + return GetData[Time64](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (time64Traits) CastToBytes(b []Time64) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Time64SizeBytes)[:len(b)*Time64SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -497,16 +448,12 @@ func (date32Traits) PutValue(b []byte, v Date32) { // // NOTE: len(b) must be a multiple of Date32SizeBytes. func (date32Traits) CastFromBytes(b []byte) []Date32 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*Date32)(unsafe.Pointer(h.Data)), cap(b)/Date32SizeBytes)[:len(b)/Date32SizeBytes] + return GetData[Date32](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (date32Traits) CastToBytes(b []Date32) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Date32SizeBytes)[:len(b)*Date32SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -533,16 +480,12 @@ func (date64Traits) PutValue(b []byte, v Date64) { // // NOTE: len(b) must be a multiple of Date64SizeBytes. func (date64Traits) CastFromBytes(b []byte) []Date64 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*Date64)(unsafe.Pointer(h.Data)), cap(b)/Date64SizeBytes)[:len(b)/Date64SizeBytes] + return GetData[Date64](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (date64Traits) CastToBytes(b []Date64) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Date64SizeBytes)[:len(b)*Date64SizeBytes] + return GetBytes(b) } // Copy copies src to dst. @@ -569,16 +512,12 @@ func (durationTraits) PutValue(b []byte, v Duration) { // // NOTE: len(b) must be a multiple of DurationSizeBytes. func (durationTraits) CastFromBytes(b []byte) []Duration { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*Duration)(unsafe.Pointer(h.Data)), cap(b)/DurationSizeBytes)[:len(b)/DurationSizeBytes] + return GetData[Duration](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (durationTraits) CastToBytes(b []Duration) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*DurationSizeBytes)[:len(b)*DurationSizeBytes] + return GetBytes(b) } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen.go.tmpl similarity index 85% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen.go.tmpl index ffae975c..dd72a5b4 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen.go.tmpl @@ -18,10 +18,9 @@ package arrow import ( "math" - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/endian" ) var ( @@ -66,16 +65,12 @@ func ({{.name}}Traits) PutValue(b []byte, v {{.Type}}) { // // NOTE: len(b) must be a multiple of {{.Name}}SizeBytes. func ({{.name}}Traits) CastFromBytes(b []byte) []{{.Type}} { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*{{.Type}})(unsafe.Pointer(h.Data)), cap(b)/{{.Name}}SizeBytes)[:len(b)/{{.Name}}SizeBytes] + return GetData[{{.Type}}](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func ({{.name}}Traits) CastToBytes(b []{{.Type}}) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*{{.Name}}SizeBytes)[:len(b)*{{.Name}}SizeBytes] + return GetBytes(b) } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen_test.go.tmpl similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen_test.go.tmpl index 96685f31..b3796915 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_numeric.gen_test.go.tmpl @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) {{- range .In}} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_timestamp.go similarity index 82% rename from vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_timestamp.go index 7c393b35..d8149c86 100644 --- a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_timestamp.go @@ -17,10 +17,9 @@ package arrow import ( - "reflect" "unsafe" - "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/endian" ) var TimestampTraits timestampTraits @@ -43,16 +42,12 @@ func (timestampTraits) PutValue(b []byte, v Timestamp) { // // NOTE: len(b) must be a multiple of TimestampSizeBytes. func (timestampTraits) CastFromBytes(b []byte) []Timestamp { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*Timestamp)(unsafe.Pointer(h.Data)), cap(b)/TimestampSizeBytes)[:len(b)/TimestampSizeBytes] + return GetData[Timestamp](b) } // CastToBytes reinterprets the slice b to a slice of bytes. func (timestampTraits) CastToBytes(b []Timestamp) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*TimestampSizeBytes)[:len(b)*TimestampSizeBytes] + return GetBytes(b) } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_view.go similarity index 54% rename from vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go rename to vendor/github.com/apache/arrow-go/v18/arrow/type_traits_view.go index f38eb5c5..7603a323 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go +++ b/vendor/github.com/apache/arrow-go/v18/arrow/type_traits_view.go @@ -14,24 +14,35 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !go1.20 && !tinygo - -package hashing +package arrow import ( - "reflect" "unsafe" + + "github.com/apache/arrow-go/v18/arrow/endian" +) + +var ViewHeaderTraits viewHeaderTraits + +const ( + ViewHeaderSizeBytes = int(unsafe.Sizeof(ViewHeader{})) ) -func hashString(val string, alg uint64) uint64 { - if val == "" { - return Hash([]byte{}, alg) - } - // highly efficient way to get byte slice without copy before - // the introduction of unsafe.StringData in go1.20 - // (https://stackoverflow.com/questions/59209493/how-to-use-unsafe-get-a-byte-slice-from-a-string-without-memory-copy) - const MaxInt32 = 1<<31 - 1 - buf := (*[MaxInt32]byte)(unsafe.Pointer((*reflect.StringHeader)( - unsafe.Pointer(&val)).Data))[: len(val)&MaxInt32 : len(val)&MaxInt32] - return Hash(buf, alg) +type viewHeaderTraits struct{} + +func (viewHeaderTraits) BytesRequired(n int) int { return ViewHeaderSizeBytes * n } + +func (viewHeaderTraits) PutValue(b []byte, v ViewHeader) { + endian.Native.PutUint32(b, uint32(v.size)) + copy(b[4:], v.data[:]) +} + +func (viewHeaderTraits) CastFromBytes(b []byte) (res []ViewHeader) { + return GetData[ViewHeader](b) } + +func (viewHeaderTraits) CastToBytes(b []ViewHeader) (res []byte) { + return GetBytes(b) +} + +func (viewHeaderTraits) Copy(dst, src []ViewHeader) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go b/vendor/github.com/apache/arrow-go/v18/arrow/unionmode_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go rename to vendor/github.com/apache/arrow-go/v18/arrow/unionmode_string.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_block_counter.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go rename to vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_block_counter.go index 86818bfd..43b2fbb2 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_block_counter.go @@ -21,8 +21,8 @@ import ( "math/bits" "unsafe" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/internal/utils" ) func loadWord(byt []byte) uint64 { @@ -165,7 +165,7 @@ func (b *BitBlockCounter) NextWord() BitBlockCount { } // OptionalBitBlockCounter is a useful counter to iterate through a possibly -// non-existent validity bitmap to allow us to write one code path for both +// nonexistent validity bitmap to allow us to write one code path for both // the with-nulls and no-nulls cases without giving up a lot of performance. type OptionalBitBlockCounter struct { hasBitmap bool diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_run_reader.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go rename to vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_run_reader.go index a1686a49..0d83f8fb 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_run_reader.go @@ -22,9 +22,9 @@ import ( "math/bits" "unsafe" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/internal/utils" ) // BitRun represents a run of bits with the same value of length Len diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_set_run_reader.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go rename to vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_set_run_reader.go index a2269ffe..d32dec9c 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bit_set_run_reader.go @@ -20,8 +20,8 @@ import ( "encoding/binary" "math/bits" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/internal/utils" ) // IsMultipleOf64 returns whether v is a multiple of 64. @@ -113,7 +113,7 @@ func (br *baseSetBitRunReader) Reset(bitmap []byte, startOffset, length int64) { bitOffset := int8(startOffset % 8) if length > 0 && bitOffset != 0 { - br.curNumBits = int32(utils.MinInt(int(length), int(8-bitOffset))) + br.curNumBits = int32(utils.Min(int(length), int(8-bitOffset))) br.curWord = br.loadPartial(bitOffset, int64(br.curNumBits)) } return @@ -124,7 +124,7 @@ func (br *baseSetBitRunReader) Reset(bitmap []byte, startOffset, length int64) { endBitOffset := int8((startOffset + length) % 8) if length > 0 && endBitOffset != 0 { br.pos++ - br.curNumBits = int32(utils.MinInt(int(length), int(endBitOffset))) + br.curNumBits = int32(utils.Min(int(length), int(endBitOffset))) br.curWord = br.loadPartial(8-endBitOffset, int64(br.curNumBits)) } } @@ -219,7 +219,7 @@ func (br *baseSetBitRunReader) skipNextZeros() { if br.remaining > 0 { br.curWord = br.loadPartial(0, br.remaining) br.curNumBits = int32(br.remaining) - nzeros := int32(utils.MinInt(int(br.curNumBits), int(br.countFirstZeros(br.curWord)))) + nzeros := int32(utils.Min(int(br.curNumBits), int(br.countFirstZeros(br.curWord)))) br.curWord = br.consumeBits(br.curWord, nzeros) br.curNumBits -= nzeros br.remaining -= int64(nzeros) diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bitmap_generate.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go rename to vendor/github.com/apache/arrow-go/v18/internal/bitutils/bitmap_generate.go index 78219d81..178751d7 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/bitutils/bitmap_generate.go @@ -16,7 +16,7 @@ package bitutils -import "github.com/apache/arrow/go/v14/arrow/bitutil" +import "github.com/apache/arrow-go/v18/arrow/bitutil" // GenerateBits writes sequential bits to a bitmap. Bits preceding the // initial start offset are preserved, bits following the bitmap may diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go b/vendor/github.com/apache/arrow-go/v18/internal/hashing/hash_funcs.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go rename to vendor/github.com/apache/arrow-go/v18/internal/hashing/hash_funcs.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go b/vendor/github.com/apache/arrow-go/v18/internal/hashing/hash_string.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go rename to vendor/github.com/apache/arrow-go/v18/internal/hashing/hash_string.go index b772c7d7..c8579c1e 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/hashing/hash_string.go @@ -24,3 +24,7 @@ func hashString(val string, alg uint64) uint64 { buf := unsafe.Slice(unsafe.StringData(val), len(val)) return Hash(buf, alg) } + +func strToBytes(v string) []byte { + return unsafe.Slice(unsafe.StringData(v), len(v)) +} diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata b/vendor/github.com/apache/arrow-go/v18/internal/hashing/types.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata rename to vendor/github.com/apache/arrow-go/v18/internal/hashing/types.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go rename to vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go index cc996552..e99a4f8f 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go @@ -21,9 +21,9 @@ package hashing import ( "math" - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/internal/utils" ) type payloadInt8 struct { diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go.tmpl similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go.tmpl index 25164341..9ba35c72 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl +++ b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.gen.go.tmpl @@ -17,8 +17,8 @@ package hashing import ( - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/internal/utils" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/internal/utils" ) {{range .In}} diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go rename to vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.go index 81994f0a..fbb8b335 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/hashing/xxh3_memo_table.go @@ -22,7 +22,6 @@ package hashing import ( "bytes" "math" - "reflect" "unsafe" ) @@ -57,7 +56,7 @@ type MemoTable interface { // and a boolean indicating whether or not the value was found in // the table (if false, the value was inserted). An error is returned // if val is not the appropriate type for the table. This function is intended to be used by - // the BinaryMemoTable to prevent uncessary allocations of the data when converting from a []byte to interface{}. + // the BinaryMemoTable to prevent unnecessary allocations of the data when converting from a []byte to interface{}. GetOrInsertBytes(val []byte) (idx int, existed bool, err error) // GetOrInsertNull returns the index of the null value in the table, // inserting one if it hasn't already been inserted. It returns a boolean @@ -67,7 +66,7 @@ type MemoTable interface { // insert one if it doesn't already exist. Will return -1 if it doesn't exist // indicated by a false value for the boolean. GetNull() (idx int, exists bool) - // WriteOut copys the unique values of the memotable out to the byte slice + // WriteOut copies the unique values of the memotable out to the byte slice // provided. Must have allocated enough bytes for all the values. WriteOut(out []byte) // WriteOutSubset is like WriteOut, but only writes a subset of values @@ -183,13 +182,7 @@ func (BinaryMemoTable) valAsByteSlice(val interface{}) []byte { case ByteSlice: return v.Bytes() case string: - var out []byte - h := (*reflect.StringHeader)(unsafe.Pointer(&v)) - s := (*reflect.SliceHeader)(unsafe.Pointer(&out)) - s.Data = h.Data - s.Len = h.Len - s.Cap = h.Len - return out + return strToBytes(v) default: panic("invalid type for binarymemotable") } diff --git a/vendor/github.com/apache/arrow/go/v14/internal/json/json.go b/vendor/github.com/apache/arrow-go/v18/internal/json/json.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/internal/json/json.go rename to vendor/github.com/apache/arrow-go/v18/internal/json/json.go index 319b12c5..b4c4c9f6 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/json/json.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/json/json.go @@ -14,8 +14,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !tinygo -// +build !tinygo +//go:build !tinygo && !arrow_json_stdlib +// +build !tinygo,!arrow_json_stdlib package json diff --git a/vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go b/vendor/github.com/apache/arrow-go/v18/internal/json/json_stdlib.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go rename to vendor/github.com/apache/arrow-go/v18/internal/json/json_stdlib.go index 8e4f447b..3031029d 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/json/json_stdlib.go @@ -14,8 +14,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build tinygo -// +build tinygo +//go:build tinygo || arrow_json_stdlib +// +build tinygo arrow_json_stdlib package json diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile b/vendor/github.com/apache/arrow-go/v18/internal/utils/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile rename to vendor/github.com/apache/arrow-go/v18/internal/utils/Makefile diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/buf_reader.go similarity index 70% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/buf_reader.go index 0b2381da..c222c8bd 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/buf_reader.go @@ -18,11 +18,96 @@ package utils import ( "bufio" + "bytes" "errors" "fmt" "io" ) +type Reader interface { + io.ReadSeeker + io.ReaderAt +} + +// byteReader is a wrapper for bytes.NewReader +type byteReader struct { + r *bytes.Reader + buf []byte + pos int +} + +// NewByteReader creates a new ByteReader instance from the given byte slice. +// It wraps the bytes.NewReader function to implement BufferedReader interface. +func NewByteReader(buf []byte) *byteReader { + r := bytes.NewReader(buf) + return &byteReader{ + r, + buf, + 0, + } +} + +func (r *byteReader) Read(buf []byte) (n int, err error) { + n, err = r.r.Read(buf) + r.pos += n + return +} + +func (r *byteReader) Seek(offset int64, whence int) (pos int64, err error) { + pos, err = r.r.Seek(offset, whence) + r.pos = int(pos) + return +} + +func (r *byteReader) ReadAt(buf []byte, off int64) (n int, err error) { + return r.r.ReadAt(buf, off) +} + +func (r *byteReader) Peek(n int) ([]byte, error) { + if n < 0 { + return nil, fmt.Errorf("arrow/bytereader: %w", bufio.ErrNegativeCount) + } + available := len(r.buf) - r.pos + read := min(n, available) + var err error + if read < n { + err = io.EOF + } + return r.buf[r.pos : r.pos+read], err +} + +func (r *byteReader) Discard(n int) (int, error) { + if n < 0 { + return 0, fmt.Errorf("arrow/bytereader: %w", bufio.ErrNegativeCount) + } + + var ( + err error + newPos = r.pos + n + ) + + if newPos >= len(r.buf) { + newPos = len(r.buf) + n = newPos - r.pos + err = io.EOF + } + + _, seekErr := r.Seek(int64(n), io.SeekCurrent) + if seekErr != nil { + return n, seekErr + } + return n, err +} + +// Outer returns the byteReader itself, since it has already implemented Reader interface. +func (r *byteReader) Outer() Reader { + return r +} + +func (r *byteReader) Reset(Reader) {} + +func (r *byteReader) BufferSize() int { return len(r.buf) } + // bufferedReader is similar to bufio.Reader except // it will expand the buffer if necessary when asked to Peek // more bytes than are in the buffer @@ -30,21 +115,14 @@ type bufferedReader struct { bufferSz int buf []byte r, w int - rd io.Reader + rd Reader err error } // NewBufferedReader returns a buffered reader with similar semantics to bufio.Reader // except Peek will expand the internal buffer if needed rather than return // an error. -func NewBufferedReader(rd io.Reader, sz int) *bufferedReader { - // if rd is already a buffered reader whose buffer is >= the requested size - // then just return it as is. no need to make a new object. - b, ok := rd.(*bufferedReader) - if ok && len(b.buf) >= sz { - return b - } - +func NewBufferedReader(rd Reader, sz int) *bufferedReader { r := &bufferedReader{ rd: rd, } @@ -52,6 +130,14 @@ func NewBufferedReader(rd io.Reader, sz int) *bufferedReader { return r } +func (b *bufferedReader) Outer() Reader { return b.rd } + +func (b *bufferedReader) Reset(rd Reader) { + b.resetBuffer() + b.rd = rd + b.r, b.w = 0, 0 +} + func (b *bufferedReader) resetBuffer() { if b.buf == nil { b.buf = make([]byte, b.bufferSz) @@ -97,6 +183,8 @@ func (b *bufferedReader) readErr() error { return err } +func (b *bufferedReader) BufferSize() int { return b.bufferSz } + // Buffered returns the number of bytes currently buffered func (b *bufferedReader) Buffered() int { return b.w - b.r } diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/endians_default.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/endians_default.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/endians_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/endians_s390x.go diff --git a/vendor/github.com/apache/arrow-go/v18/internal/utils/math.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/math.go new file mode 100644 index 00000000..10acf366 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/math.go @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "math" + "math/bits" + + "golang.org/x/exp/constraints" +) + +func Min[T constraints.Ordered](a, b T) T { + if a < b { + return a + } + return b +} + +func Max[T constraints.Ordered](a, b T) T { + if a > b { + return a + } + return b +} + +// Add returns the sum of two integers while checking for [overflow]. +// It returns false (not ok) when the operation overflows. +// +// [overflow]: https://go.dev/ref/spec#Integer_overflow +func Add[T constraints.Signed](a, b T) (T, bool) { + // Overflow occurs when a and b are too positive or too negative. + // That is, when: (a > 0) && (b > 0) && (a > math.Max[T] - b) + // or when: (a < 0) && (b < 0) && (a < math.Min[T] - b) + result := a + b + + // No overflow occurred if the result is larger exactly when b is positive. + return result, (result > a) == (b > 0) +} + +const ( + sqrtMaxInt = 1<<((bits.UintSize>>1)-1) - 1 + sqrtMinInt = -1 << ((bits.UintSize >> 1) - 1) +) + +// Mul returns the product of two integers while checking for [overflow]. +// It returns false (not ok) when the operation overflows. +// +// [overflow]: https://go.dev/ref/spec#Integer_overflow +func Mul(a, b int) (int, bool) { + // Avoid division by zero and calculate nothing when a or b is zero. + if a == 0 || b == 0 { + return 0, true + } + + result := a * b + + // Overflow occurred if the result is positive when exactly one input + // is negative. + if result > 0 == ((a < 0) != (b < 0)) { + return result, false + } + + // Overflow cannot occur when a or b is zero or one. + // Overflow cannot occur when a and b are less positive than sqrt(MaxInt). + // Overflow cannot occur when a and b are less negative than sqrt(MinInt). + if (sqrtMinInt <= a && a <= sqrtMaxInt && + sqrtMinInt <= b && b <= sqrtMaxInt) || a == 1 || b == 1 { + return result, true + } + + // Finally, no overflow occurred if division produces the input. This is + // last because division can be expensive. Dividing by -1 can overflow, + // but we returned early in that case above. + return result, (result/a == b) +} + +const ( + sqrtMaxInt64 = math.MaxInt32 + sqrtMinInt64 = math.MinInt32 +) + +// Mul64 returns the product of two integers while checking for [overflow]. +// It returns false (not ok) when the operation overflows. +// +// [overflow]: https://go.dev/ref/spec#Integer_overflow +func Mul64(a, b int64) (int64, bool) { + // Avoid division by zero and calculate nothing when a or b is zero. + if a == 0 || b == 0 { + return 0, true + } + + result := a * b + + // Overflow occurred if the result is positive when exactly one input + // is negative. + if result > 0 == ((a < 0) != (b < 0)) { + return result, false + } + + // Overflow cannot occur when a or b is zero or one. + // Overflow cannot occur when a and b are less positive than sqrt(MaxInt64). + // Overflow cannot occur when a and b are less negative than sqrt(MinInt64). + if (sqrtMinInt64 <= a && a <= sqrtMaxInt64 && + sqrtMinInt64 <= b && b <= sqrtMaxInt64) || a == 1 || b == 1 { + return result, true + } + + // Finally, no overflow occurred if division produces the input. This is + // last because division can be expensive. Dividing by -1 can overflow, + // but we returned early in that case above. + return result, (result/a == b) +} diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_arm64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_arm64.go index 7404e95d..d0284966 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_arm64.go @@ -21,8 +21,9 @@ package utils import ( "os" "strings" + + "golang.org/x/sys/cpu" ) -import "golang.org/x/sys/cpu" func init() { // Added ability to enable extension via environment: diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_avx2_amd64.s similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_avx2_amd64.s index fe0c36e0..4c61197a 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_avx2_amd64.s @@ -15,7 +15,7 @@ DATA LCDATA1<>+0x050(SB)/8, $0x8080808080808080 DATA LCDATA1<>+0x058(SB)/8, $0x8080808080808080 GLOBL LCDATA1<>(SB), 8, $96 -TEXT ·_int8_max_min_avx2(SB), $0-32 +TEXT ·_int8_max_min_avx2(SB), $8-32 MOVQ values+0(FP), DI MOVQ length+8(FP), SI @@ -133,7 +133,7 @@ LBB0_5: JNE LBB0_9 JMP LBB0_10 -TEXT ·_uint8_max_min_avx2(SB), $0-32 +TEXT ·_uint8_max_min_avx2(SB), NOSPLIT, $0-32 MOVQ values+0(FP), DI MOVQ length+8(FP), SI @@ -263,7 +263,7 @@ DATA LCDATA2<>+0x050(SB)/8, $0x8000800080008000 DATA LCDATA2<>+0x058(SB)/8, $0x8000800080008000 GLOBL LCDATA2<>(SB), 8, $96 -TEXT ·_int16_max_min_avx2(SB), $0-32 +TEXT ·_int16_max_min_avx2(SB), NOSPLIT, $8-32 MOVQ values+0(FP), DI MOVQ length+8(FP), SI @@ -488,7 +488,7 @@ LBB3_5: DATA LCDATA3<>+0x000(SB)/8, $0x7fffffff80000000 GLOBL LCDATA3<>(SB), 8, $8 -TEXT ·_int32_max_min_avx2(SB), $0-32 +TEXT ·_int32_max_min_avx2(SB), NOSPLIT, $8-32 MOVQ values+0(FP), DI MOVQ length+8(FP), SI @@ -677,7 +677,7 @@ DATA LCDATA4<>+0x000(SB)/8, $0x8000000000000000 DATA LCDATA4<>+0x008(SB)/8, $0x7fffffffffffffff GLOBL LCDATA4<>(SB), 8, $16 -TEXT ·_int64_max_min_avx2(SB), $0-32 +TEXT ·_int64_max_min_avx2(SB), $8-32 MOVQ values+0(FP), DI MOVQ length+8(FP), SI @@ -786,7 +786,7 @@ LBB6_8: DATA LCDATA5<>+0x000(SB)/8, $0x8000000000000000 GLOBL LCDATA5<>(SB), 8, $8 -TEXT ·_uint64_max_min_avx2(SB), $0-32 +TEXT ·_uint64_max_min_avx2(SB), $8-32 MOVQ values+0(FP), DI MOVQ length+8(FP), SI diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_neon_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_neon_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_neon_arm64.s similarity index 90% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_neon_arm64.s index b679bb6e..a31c5d2e 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_neon_arm64.s @@ -13,6 +13,10 @@ TEXT ·_int32_max_min_neon(SB), $0-32 MOVD minout+16(FP), R2 MOVD maxout+24(FP), R3 + // The Go ABI saves the frame pointer register one word below the + // caller's frame. Make room so we don't overwrite it. Needs to stay + // 16-byte aligned + SUB $16, RSP WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! WORD $0x7100043f // cmp w1, #1 WORD $0x910003fd // mov x29, sp @@ -32,6 +36,8 @@ LBB0_3: WORD $0xb900006b // str w11, [x3] WORD $0xb900004a // str w10, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET LBB0_4: WORD $0x927e7509 // and x9, x8, #0xfffffffc @@ -76,6 +82,8 @@ LBB0_9: WORD $0xb900006b // str w11, [x3] WORD $0xb900004a // str w10, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET // func _uint32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) @@ -86,6 +94,10 @@ TEXT ·_uint32_max_min_neon(SB), $0-32 MOVD minout+16(FP), R2 MOVD maxout+24(FP), R3 + // The Go ABI saves the frame pointer register one word below the + // caller's frame. Make room so we don't overwrite it. Needs to stay + // 16-byte aligned + SUB $16, RSP WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! WORD $0x7100043f // cmp w1, #1 WORD $0x910003fd // mov x29, sp @@ -105,6 +117,8 @@ LBB1_3: WORD $0xb900006a // str w10, [x3] WORD $0xb900004b // str w11, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET LBB1_4: WORD $0x927e7509 // and x9, x8, #0xfffffffc @@ -149,6 +163,8 @@ LBB1_9: WORD $0xb900006a // str w10, [x3] WORD $0xb900004b // str w11, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET // func _int64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer) @@ -159,6 +175,10 @@ TEXT ·_int64_max_min_neon(SB), $0-32 MOVD minout+16(FP), R2 MOVD maxout+24(FP), R3 + // The Go ABI saves the frame pointer register one word below the + // caller's frame. Make room so we don't overwrite it. Needs to stay + // 16-byte aligned + SUB $16, RSP WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! WORD $0x7100043f // cmp w1, #1 WORD $0x910003fd // mov x29, sp @@ -178,6 +198,8 @@ LBB2_3: WORD $0xf900006b // str x11, [x3] WORD $0xf900004a // str x10, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET LBB2_4: WORD $0x927e7509 // and x9, x8, #0xfffffffc @@ -234,6 +256,8 @@ LBB2_9: WORD $0xf900006b // str x11, [x3] WORD $0xf900004a // str x10, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET @@ -245,6 +269,10 @@ TEXT ·_uint64_max_min_neon(SB), $0-32 MOVD minout+16(FP), R2 MOVD maxout+24(FP), R3 + // The Go ABI saves the frame pointer register one word below the + // caller's frame. Make room so we don't overwrite it. Needs to stay + // 16-byte aligned + SUB $16, RSP WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]! WORD $0x7100043f // cmp w1, #1 WORD $0x910003fd // mov x29, sp @@ -264,6 +292,8 @@ LBB3_3: WORD $0xf900006a // str x10, [x3] WORD $0xf900004b // str x11, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET LBB3_4: WORD $0x927e7509 // and x9, x8, #0xfffffffc @@ -320,5 +350,7 @@ LBB3_9: WORD $0xf900006a // str x10, [x3] WORD $0xf900004b // str x11, [x2] WORD $0xa8c17bfd // ldp x29, x30, [sp], #16 + // Put the stack pointer back where it was + ADD $16, RSP RET diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_noasm.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_noasm.go index 19c24b59..625f0ea3 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_noasm.go @@ -14,7 +14,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build noasm +//go:build noasm || (!amd64 && !arm64 && !s390x && !ppc64le) package utils diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_ppc64le.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_ppc64le.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/internal/utils/min_max_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow-go/v18/internal/utils/recovery.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/recovery.go new file mode 100644 index 00000000..58aede47 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/recovery.go @@ -0,0 +1,31 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import "fmt" + +// FormatRecoveredError is used in cases where a panic/recover receives an +// object which is potentially an error that could be wrapped, instead of +// formatted, so that callers can see it. This may be useful, for example, +// with custom Allocators which panic to signal failure; these panics will be +// recovered as wrapped errors, letting the client distinguish them. +func FormatRecoveredError(msg string, recovered any) error { + if err, ok := recovered.(error); ok { + return fmt.Errorf("%s: %w", msg, err) + } + return fmt.Errorf("%s: %v", msg, recovered) +} diff --git a/vendor/github.com/apache/arrow-go/v18/internal/utils/ref_count.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/ref_count.go new file mode 100644 index 00000000..9b85f75b --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/ref_count.go @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import "sync/atomic" + +// NewRefCount creates a new atomic counter set to the specified initial value. +func NewRefCount(initial int64) *atomic.Int64 { + var val atomic.Int64 + val.Store(initial) + return &val +} diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_amd64.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_amd64.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_def.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_def.go index cc3b0abb..05a9e5b6 100644 --- a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go +++ b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_def.go @@ -19,7 +19,7 @@ package utils import ( "errors" - "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow-go/v18/arrow" ) //go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata -d arch=avx2 transpose_ints_simd.go.tmpl=transpose_ints_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_noasm.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_noasm.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_noasm.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_noasm.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_ppc64le.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_ppc64le.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_s390x.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_s390x.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_simd.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_simd.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s b/vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s rename to vendor/github.com/apache/arrow-go/v18/internal/utils/transpose_ints_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v14/LICENSE.txt b/vendor/github.com/apache/arrow/go/v14/LICENSE.txt deleted file mode 100644 index 57310329..00000000 --- a/vendor/github.com/apache/arrow/go/v14/LICENSE.txt +++ /dev/null @@ -1,1791 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- - -src/arrow/util (some portions): Apache 2.0, and 3-clause BSD - -Some portions of this module are derived from code in the Chromium project, -copyright (c) Google inc and (c) The Chromium Authors and licensed under the -Apache 2.0 License or the under the 3-clause BSD license: - - Copyright (c) 2013 The Chromium Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project includes code from Daniel Lemire's FrameOfReference project. - -https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp - -Copyright: 2013 Daniel Lemire -Home page: http://lemire.me/en/ -Project page: https://github.com/lemire/FrameOfReference -License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from the TensorFlow project - -Copyright 2015 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -This project includes code from the NumPy project. - -https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 - -https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c - -Copyright (c) 2005-2017, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project includes code from the Boost project - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -This project includes code from the FlatBuffers project - -Copyright 2014 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -This project includes code from the tslib project - -Copyright 2015 Microsoft Corporation. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -This project includes code from the jemalloc project - -https://github.com/jemalloc/jemalloc - -Copyright (C) 2002-2017 Jason Evans . -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- - -This project includes code from the Go project, BSD 3-clause license + PATENTS -weak patent termination clause -(https://github.com/golang/go/blob/master/PATENTS). - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project includes code from the hs2client - -https://github.com/cloudera/hs2client - -Copyright 2016 Cloudera Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -The script ci/scripts/util_wait_for_it.sh has the following license - -Copyright (c) 2016 Giles Hall - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The script r/configure has the following license (MIT) - -Copyright (c) 2017, Jeroen Ooms and Jim Hester - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and -cpp/src/arrow/util/logging-test.cc are adapted from -Ray Project (https://github.com/ray-project/ray) (Apache 2.0). - -Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- -The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, -cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, -cpp/src/arrow/vendored/datetime/ios.mm, -cpp/src/arrow/vendored/datetime/tz.cpp are adapted from -Howard Hinnant's date library (https://github.com/HowardHinnant/date) -It is licensed under MIT license. - -The MIT License (MIT) -Copyright (c) 2015, 2016, 2017 Howard Hinnant -Copyright (c) 2016 Adrian Colomitchi -Copyright (c) 2017 Florian Dang -Copyright (c) 2017 Paul Thompson -Copyright (c) 2018 Tomasz Kamiński - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/util/utf8.h includes code adapted from the page - https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ -with the following license (MIT) - -Copyright (c) 2008-2009 Bjoern Hoehrmann - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/vendored/string_view.hpp has the following license - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/xxhash/ have the following license -(BSD 2-Clause License) - -xxHash Library -Copyright (c) 2012-2014, Yann Collet -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -You can contact the author at : -- xxHash homepage: http://www.xxhash.com -- xxHash source repository : https://github.com/Cyan4973/xxHash - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/double-conversion/ have the following license -(BSD 3-Clause License) - -Copyright 2006-2011, the V8 project authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/uriparser/ have the following license -(BSD 3-Clause License) - -uriparser - RFC 3986 URI parsing library - -Copyright (C) 2007, Weijia Song -Copyright (C) 2007, Sebastian Pipping -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - * Redistributions of source code must retain the above - copyright notice, this list of conditions and the following - disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - * Neither the name of the nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files under dev/tasks/conda-recipes have the following license - -BSD 3-clause license -Copyright (c) 2015-2018, conda-forge -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/utf8cpp/ have the following license - -Copyright 2006 Nemanja Trifunovic - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -This project includes code from Apache Kudu. - - * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake - -Copyright: 2016 The Apache Software Foundation. -Home page: https://kudu.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from Apache Impala (incubating), formerly -Impala. The Impala code and rights were donated to the ASF as part of the -Incubator process after the initial code imports into Apache Parquet. - -Copyright: 2012 Cloudera, Inc. -Copyright: 2016 The Apache Software Foundation. -Home page: http://impala.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from Apache Aurora. - -* dev/release/{release,changelog,release-candidate} are based on the scripts from - Apache Aurora - -Copyright: 2016 The Apache Software Foundation. -Home page: https://aurora.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from the Google styleguide. - -* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. - -Copyright: 2009 Google Inc. All rights reserved. -Homepage: https://github.com/google/styleguide -License: 3-clause BSD - --------------------------------------------------------------------------------- - -This project includes code from Snappy. - -* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code - from Google's Snappy project. - -Copyright: 2009 Google Inc. All rights reserved. -Homepage: https://github.com/google/snappy -License: 3-clause BSD - --------------------------------------------------------------------------------- - -This project includes code from the manylinux project. - -* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, - requirements.txt} are based on code from the manylinux project. - -Copyright: 2016 manylinux -Homepage: https://github.com/pypa/manylinux -License: The MIT License (MIT) - --------------------------------------------------------------------------------- - -This project includes code from the cymove project: - -* python/pyarrow/includes/common.pxd includes code from the cymove project - -The MIT License (MIT) -Copyright (c) 2019 Omer Ozarslan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE -OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -The projects includes code from the Ursabot project under the dev/archery -directory. - -License: BSD 2-Clause - -Copyright 2019 RStudio, Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project include code from CMake. - -* cpp/cmake_modules/FindGTest.cmake is based on code from CMake. - -Copyright: Copyright 2000-2019 Kitware, Inc. and Contributors -Homepage: https://gitlab.kitware.com/cmake/cmake -License: 3-clause BSD - --------------------------------------------------------------------------------- - -This project include code from mingw-w64. - -* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 - -Copyright (c) 2009 - 2013 by the mingw-w64 project -Homepage: https://mingw-w64.org -License: Zope Public License (ZPL) Version 2.1. - ---------------------------------------------------------------------------------- - -This project include code from Google's Asylo project. - -* cpp/src/arrow/result.h is based on status_or.h - -Copyright (c) Copyright 2017 Asylo authors -Homepage: https://asylo.dev/ -License: Apache 2.0 - --------------------------------------------------------------------------------- - -This project includes code from Google's protobuf project - -* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN - -Copyright 2008 Google Inc. All rights reserved. -Homepage: https://developers.google.com/protocol-buffers/ -License: - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - --------------------------------------------------------------------------------- - -3rdparty dependency LLVM is statically linked in certain binary distributions. -Additionally some sections of source code have been derived from sources in LLVM -and have been clearly labeled as such. LLVM has the following license: - -============================================================================== -LLVM Release License -============================================================================== -University of Illinois/NCSA -Open Source License - -Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign. -All rights reserved. - -Developed by: - - LLVM Team - - University of Illinois at Urbana-Champaign - - http://llvm.org - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal with -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimers. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimers in the - documentation and/or other materials provided with the distribution. - - * Neither the names of the LLVM Team, University of Illinois at - Urbana-Champaign, nor the names of its contributors may be used to - endorse or promote products derived from this Software without specific - prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE -SOFTWARE. - -============================================================================== -Copyrights and Licenses for Third Party Software Distributed with LLVM: -============================================================================== -The LLVM software contains code written by third parties. Such software will -have its own individual LICENSE.TXT file in the directory in which it appears. -This file will describe the copyrights, license, and restrictions which apply -to that code. - -The disclaimer of warranty in the University of Illinois Open Source License -applies to all code in the LLVM Distribution, and nothing in any of the -other licenses gives permission to use the names of the LLVM Team or the -University of Illinois to endorse or promote products derived from this -Software. - -The following pieces of software have additional or alternate copyrights, -licenses, and/or restrictions: - -Program Directory -------- --------- -Google Test llvm/utils/unittest/googletest -OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex} -pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT} -ARM contributions llvm/lib/Target/ARM/LICENSE.TXT -md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h - --------------------------------------------------------------------------------- - -3rdparty dependency gRPC is statically linked in certain binary -distributions, like the python wheels. gRPC has the following license: - -Copyright 2014 gRPC authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency Apache Thrift is statically linked in certain binary -distributions, like the python wheels. Apache Thrift has the following license: - -Apache Thrift -Copyright (C) 2006 - 2019, The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency Apache ORC is statically linked in certain binary -distributions, like the python wheels. Apache ORC has the following license: - -Apache ORC -Copyright 2013-2019 The Apache Software Foundation - -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). - -This product includes software developed by Hewlett-Packard: -(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency zstd is statically linked in certain binary -distributions, like the python wheels. ZSTD has the following license: - -BSD License - -For Zstandard software - -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency lz4 is statically linked in certain binary -distributions, like the python wheels. lz4 has the following license: - -LZ4 Library -Copyright (c) 2011-2016, Yann Collet -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency Brotli is statically linked in certain binary -distributions, like the python wheels. Brotli has the following license: - -Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - --------------------------------------------------------------------------------- - -3rdparty dependency snappy is statically linked in certain binary -distributions, like the python wheels. snappy has the following license: - -Copyright 2011, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -=== - -Some of the benchmark data in testdata/ is licensed differently: - - - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and - is licensed under the Creative Commons Attribution 3.0 license - (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ - for more information. - - - kppkn.gtb is taken from the Gaviota chess tablebase set, and - is licensed under the MIT License. See - https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 - for more information. - - - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper - “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA - Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, - which is licensed under the CC-BY license. See - http://www.ploscompbiol.org/static/license for more ifnormation. - - - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project - Gutenberg. The first three have expired copyrights and are in the public - domain; the latter does not have expired copyright, but is still in the - public domain according to the license information - (http://www.gutenberg.org/ebooks/53). - --------------------------------------------------------------------------------- - -3rdparty dependency gflags is statically linked in certain binary -distributions, like the python wheels. gflags has the following license: - -Copyright (c) 2006, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency glog is statically linked in certain binary -distributions, like the python wheels. glog has the following license: - -Copyright (c) 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -A function gettimeofday in utilities.cc is based on - -http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd - -The license of this code is: - -Copyright (c) 2003-2008, Jouni Malinen and contributors -All Rights Reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name(s) of the above-listed copyright holder(s) nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency re2 is statically linked in certain binary -distributions, like the python wheels. re2 has the following license: - -Copyright (c) 2009 The RE2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - * Neither the name of Google Inc. nor the names of its contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency c-ares is statically linked in certain binary -distributions, like the python wheels. c-ares has the following license: - -# c-ares license - -Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS -file. - -Copyright 1998 by the Massachusetts Institute of Technology. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose and without fee is hereby granted, provided that -the above copyright notice appear in all copies and that both that copyright -notice and this permission notice appear in supporting documentation, and that -the name of M.I.T. not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior permission. -M.I.T. makes no representations about the suitability of this software for any -purpose. It is provided "as is" without express or implied warranty. - --------------------------------------------------------------------------------- - -3rdparty dependency zlib is redistributed as a dynamically linked shared -library in certain binary distributions, like the python wheels. In the future -this will likely change to static linkage. zlib has the following license: - -zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.11, January 15th, 2017 - - Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - --------------------------------------------------------------------------------- - -3rdparty dependency openssl is redistributed as a dynamically linked shared -library in certain binary distributions, like the python wheels. openssl -preceding version 3 has the following license: - - LICENSE ISSUES - ============== - - The OpenSSL toolkit stays under a double license, i.e. both the conditions of - the OpenSSL License and the original SSLeay license apply to the toolkit. - See below for the actual license texts. - - OpenSSL License - --------------- - -/* ==================================================================== - * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - - Original SSLeay License - ----------------------- - -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ - --------------------------------------------------------------------------------- - -This project includes code from the rtools-backports project. - -* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code - from the rtools-backports project. - -Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. -All rights reserved. -Homepage: https://github.com/r-windows/rtools-backports -License: 3-clause BSD - --------------------------------------------------------------------------------- - -Some code from pandas has been adapted for the pyarrow codebase. pandas is -available under the 3-clause BSD license, which follows: - -pandas license -============== - -Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team -All rights reserved. - -Copyright (c) 2008-2011 AQR Capital Management, LLC -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the copyright holder nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -Some bits from DyND, in particular aspects of the build system, have been -adapted from libdynd and dynd-python under the terms of the BSD 2-clause -license - -The BSD 2-Clause License - - Copyright (C) 2011-12, Dynamic NDArray Developers - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Dynamic NDArray Developers list: - - * Mark Wiebe - * Continuum Analytics - --------------------------------------------------------------------------------- - -Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted -for PyArrow. Ibis is released under the Apache License, Version 2.0. - --------------------------------------------------------------------------------- - -dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: - -BSD 2-Clause License - -Copyright (c) 2009-present, Homebrew contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- - -cpp/src/arrow/vendored/base64.cpp has the following license - -ZLIB License - -Copyright (C) 2004-2017 René Nyffenegger - -This source code is provided 'as-is', without any express or implied -warranty. In no event will the author be held liable for any damages arising -from the use of this software. - -Permission is granted to anyone to use this software for any purpose, including -commercial applications, and to alter it and redistribute it freely, subject to -the following restrictions: - -1. The origin of this source code must not be misrepresented; you must not - claim that you wrote the original source code. If you use this source code - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - -2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original source code. - -3. This notice may not be removed or altered from any source distribution. - -René Nyffenegger rene.nyffenegger@adp-gmbh.ch - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/vendored/optional.hpp has the following license - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/vendored/musl/strptime.c has the following license - -Copyright © 2005-2020 Rich Felker, et al. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go deleted file mode 100644 index 33175316..00000000 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go +++ /dev/null @@ -1,365 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package array - -import ( - "bytes" - "fmt" - "math" - "math/big" - "reflect" - "strings" - "sync/atomic" - - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/decimal128" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" -) - -// A type which represents an immutable sequence of 128-bit decimal values. -type Decimal128 struct { - array - - values []decimal128.Num -} - -func NewDecimal128Data(data arrow.ArrayData) *Decimal128 { - a := &Decimal128{} - a.refCount = 1 - a.setData(data.(*Data)) - return a -} - -func (a *Decimal128) Value(i int) decimal128.Num { return a.values[i] } - -func (a *Decimal128) ValueStr(i int) string { - if a.IsNull(i) { - return NullValueStr - } - return a.GetOneForMarshal(i).(string) -} - -func (a *Decimal128) Values() []decimal128.Num { return a.values } - -func (a *Decimal128) String() string { - o := new(strings.Builder) - o.WriteString("[") - for i := 0; i < a.Len(); i++ { - if i > 0 { - fmt.Fprintf(o, " ") - } - switch { - case a.IsNull(i): - o.WriteString(NullValueStr) - default: - fmt.Fprintf(o, "%v", a.Value(i)) - } - } - o.WriteString("]") - return o.String() -} - -func (a *Decimal128) setData(data *Data) { - a.array.setData(data) - vals := data.buffers[1] - if vals != nil { - a.values = arrow.Decimal128Traits.CastFromBytes(vals.Bytes()) - beg := a.array.data.offset - end := beg + a.array.data.length - a.values = a.values[beg:end] - } -} - -func (a *Decimal128) GetOneForMarshal(i int) interface{} { - if a.IsNull(i) { - return nil - } - - typ := a.DataType().(*arrow.Decimal128Type) - f := (&big.Float{}).SetInt(a.Value(i).BigInt()) - f.Quo(f, big.NewFloat(math.Pow10(int(typ.Scale)))) - return f.Text('g', int(typ.Precision)) -} - -// ["1.23", ] -func (a *Decimal128) MarshalJSON() ([]byte, error) { - vals := make([]interface{}, a.Len()) - for i := 0; i < a.Len(); i++ { - vals[i] = a.GetOneForMarshal(i) - } - return json.Marshal(vals) -} - -func arrayEqualDecimal128(left, right *Decimal128) bool { - for i := 0; i < left.Len(); i++ { - if left.IsNull(i) { - continue - } - if left.Value(i) != right.Value(i) { - return false - } - } - return true -} - -type Decimal128Builder struct { - builder - - dtype *arrow.Decimal128Type - data *memory.Buffer - rawData []decimal128.Num -} - -func NewDecimal128Builder(mem memory.Allocator, dtype *arrow.Decimal128Type) *Decimal128Builder { - return &Decimal128Builder{ - builder: builder{refCount: 1, mem: mem}, - dtype: dtype, - } -} - -func (b *Decimal128Builder) Type() arrow.DataType { return b.dtype } - -// Release decreases the reference count by 1. -// When the reference count goes to zero, the memory is freed. -func (b *Decimal128Builder) Release() { - debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") - - if atomic.AddInt64(&b.refCount, -1) == 0 { - if b.nullBitmap != nil { - b.nullBitmap.Release() - b.nullBitmap = nil - } - if b.data != nil { - b.data.Release() - b.data = nil - b.rawData = nil - } - } -} - -func (b *Decimal128Builder) Append(v decimal128.Num) { - b.Reserve(1) - b.UnsafeAppend(v) -} - -func (b *Decimal128Builder) UnsafeAppend(v decimal128.Num) { - bitutil.SetBit(b.nullBitmap.Bytes(), b.length) - b.rawData[b.length] = v - b.length++ -} - -func (b *Decimal128Builder) AppendNull() { - b.Reserve(1) - b.UnsafeAppendBoolToBitmap(false) -} - -func (b *Decimal128Builder) AppendNulls(n int) { - for i := 0; i < n; i++ { - b.AppendNull() - } -} - -func (b *Decimal128Builder) AppendEmptyValue() { - b.Append(decimal128.Num{}) -} - -func (b *Decimal128Builder) AppendEmptyValues(n int) { - for i := 0; i < n; i++ { - b.AppendEmptyValue() - } -} - -func (b *Decimal128Builder) UnsafeAppendBoolToBitmap(isValid bool) { - if isValid { - bitutil.SetBit(b.nullBitmap.Bytes(), b.length) - } else { - b.nulls++ - } - b.length++ -} - -// AppendValues will append the values in the v slice. The valid slice determines which values -// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, -// all values in v are appended and considered valid. -func (b *Decimal128Builder) AppendValues(v []decimal128.Num, valid []bool) { - if len(v) != len(valid) && len(valid) != 0 { - panic("len(v) != len(valid) && len(valid) != 0") - } - - if len(v) == 0 { - return - } - - b.Reserve(len(v)) - if len(v) > 0 { - arrow.Decimal128Traits.Copy(b.rawData[b.length:], v) - } - b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) -} - -func (b *Decimal128Builder) init(capacity int) { - b.builder.init(capacity) - - b.data = memory.NewResizableBuffer(b.mem) - bytesN := arrow.Decimal128Traits.BytesRequired(capacity) - b.data.Resize(bytesN) - b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes()) -} - -// Reserve ensures there is enough space for appending n elements -// by checking the capacity and calling Resize if necessary. -func (b *Decimal128Builder) Reserve(n int) { - b.builder.reserve(n, b.Resize) -} - -// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), -// additional memory will be allocated. If n is smaller, the allocated memory may reduced. -func (b *Decimal128Builder) Resize(n int) { - nBuilder := n - if n < minBuilderCapacity { - n = minBuilderCapacity - } - - if b.capacity == 0 { - b.init(n) - } else { - b.builder.resize(nBuilder, b.init) - b.data.Resize(arrow.Decimal128Traits.BytesRequired(n)) - b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes()) - } -} - -// NewArray creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder -// so it can be used to build a new array. -func (b *Decimal128Builder) NewArray() arrow.Array { - return b.NewDecimal128Array() -} - -// NewDecimal128Array creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder -// so it can be used to build a new array. -func (b *Decimal128Builder) NewDecimal128Array() (a *Decimal128) { - data := b.newData() - a = NewDecimal128Data(data) - data.Release() - return -} - -func (b *Decimal128Builder) newData() (data *Data) { - bytesRequired := arrow.Decimal128Traits.BytesRequired(b.length) - if bytesRequired > 0 && bytesRequired < b.data.Len() { - // trim buffers - b.data.Resize(bytesRequired) - } - data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) - b.reset() - - if b.data != nil { - b.data.Release() - b.data = nil - b.rawData = nil - } - - return -} - -func (b *Decimal128Builder) AppendValueFromString(s string) error { - if s == NullValueStr { - b.AppendNull() - return nil - } - val, err := decimal128.FromString(s, b.dtype.Precision, b.dtype.Scale) - if err != nil { - b.AppendNull() - return err - } - b.Append(val) - return nil -} - -func (b *Decimal128Builder) UnmarshalOne(dec *json.Decoder) error { - t, err := dec.Token() - if err != nil { - return err - } - - switch v := t.(type) { - case float64: - val, err := decimal128.FromFloat64(v, b.dtype.Precision, b.dtype.Scale) - if err != nil { - return err - } - b.Append(val) - case string: - val, err := decimal128.FromString(v, b.dtype.Precision, b.dtype.Scale) - if err != nil { - return err - } - b.Append(val) - case json.Number: - val, err := decimal128.FromString(v.String(), b.dtype.Precision, b.dtype.Scale) - if err != nil { - return err - } - b.Append(val) - case nil: - b.AppendNull() - return nil - default: - return &json.UnmarshalTypeError{ - Value: fmt.Sprint(t), - Type: reflect.TypeOf(decimal128.Num{}), - Offset: dec.InputOffset(), - } - } - - return nil -} - -func (b *Decimal128Builder) Unmarshal(dec *json.Decoder) error { - for dec.More() { - if err := b.UnmarshalOne(dec); err != nil { - return err - } - } - return nil -} - -// UnmarshalJSON will add the unmarshalled values to this builder. -// -// If the values are strings, they will get parsed with big.ParseFloat using -// a rounding mode of big.ToNearestAway currently. -func (b *Decimal128Builder) UnmarshalJSON(data []byte) error { - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return err - } - - if delim, ok := t.(json.Delim); !ok || delim != '[' { - return fmt.Errorf("decimal128 builder must unpack from json array, found %s", delim) - } - - return b.Unmarshal(dec) -} - -var ( - _ arrow.Array = (*Decimal128)(nil) - _ Builder = (*Decimal128Builder)(nil) -) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go deleted file mode 100644 index d63544f7..00000000 --- a/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go +++ /dev/null @@ -1,364 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package array - -import ( - "bytes" - "fmt" - "math" - "math/big" - "reflect" - "strings" - "sync/atomic" - - "github.com/apache/arrow/go/v14/arrow" - "github.com/apache/arrow/go/v14/arrow/bitutil" - "github.com/apache/arrow/go/v14/arrow/decimal256" - "github.com/apache/arrow/go/v14/arrow/internal/debug" - "github.com/apache/arrow/go/v14/arrow/memory" - "github.com/apache/arrow/go/v14/internal/json" -) - -// Decimal256 is a type that represents an immutable sequence of 256-bit decimal values. -type Decimal256 struct { - array - - values []decimal256.Num -} - -func NewDecimal256Data(data arrow.ArrayData) *Decimal256 { - a := &Decimal256{} - a.refCount = 1 - a.setData(data.(*Data)) - return a -} - -func (a *Decimal256) Value(i int) decimal256.Num { return a.values[i] } - -func (a *Decimal256) ValueStr(i int) string { - if a.IsNull(i) { - return NullValueStr - } - return a.GetOneForMarshal(i).(string) -} - -func (a *Decimal256) Values() []decimal256.Num { return a.values } - -func (a *Decimal256) String() string { - o := new(strings.Builder) - o.WriteString("[") - for i := 0; i < a.Len(); i++ { - if i > 0 { - fmt.Fprintf(o, " ") - } - switch { - case a.IsNull(i): - o.WriteString(NullValueStr) - default: - fmt.Fprintf(o, "%v", a.Value(i)) - } - } - o.WriteString("]") - return o.String() -} - -func (a *Decimal256) setData(data *Data) { - a.array.setData(data) - vals := data.buffers[1] - if vals != nil { - a.values = arrow.Decimal256Traits.CastFromBytes(vals.Bytes()) - beg := a.array.data.offset - end := beg + a.array.data.length - a.values = a.values[beg:end] - } -} - -func (a *Decimal256) GetOneForMarshal(i int) interface{} { - if a.IsNull(i) { - return nil - } - - typ := a.DataType().(*arrow.Decimal256Type) - f := (&big.Float{}).SetInt(a.Value(i).BigInt()) - f.Quo(f, big.NewFloat(math.Pow10(int(typ.Scale)))) - return f.Text('g', int(typ.Precision)) -} - -func (a *Decimal256) MarshalJSON() ([]byte, error) { - vals := make([]interface{}, a.Len()) - for i := 0; i < a.Len(); i++ { - vals[i] = a.GetOneForMarshal(i) - } - return json.Marshal(vals) -} - -func arrayEqualDecimal256(left, right *Decimal256) bool { - for i := 0; i < left.Len(); i++ { - if left.IsNull(i) { - continue - } - if left.Value(i) != right.Value(i) { - return false - } - } - return true -} - -type Decimal256Builder struct { - builder - - dtype *arrow.Decimal256Type - data *memory.Buffer - rawData []decimal256.Num -} - -func NewDecimal256Builder(mem memory.Allocator, dtype *arrow.Decimal256Type) *Decimal256Builder { - return &Decimal256Builder{ - builder: builder{refCount: 1, mem: mem}, - dtype: dtype, - } -} - -// Release decreases the reference count by 1. -// When the reference count goes to zero, the memory is freed. -func (b *Decimal256Builder) Release() { - debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") - - if atomic.AddInt64(&b.refCount, -1) == 0 { - if b.nullBitmap != nil { - b.nullBitmap.Release() - b.nullBitmap = nil - } - if b.data != nil { - b.data.Release() - b.data = nil - b.rawData = nil - } - } -} - -func (b *Decimal256Builder) Append(v decimal256.Num) { - b.Reserve(1) - b.UnsafeAppend(v) -} - -func (b *Decimal256Builder) UnsafeAppend(v decimal256.Num) { - bitutil.SetBit(b.nullBitmap.Bytes(), b.length) - b.rawData[b.length] = v - b.length++ -} - -func (b *Decimal256Builder) AppendNull() { - b.Reserve(1) - b.UnsafeAppendBoolToBitmap(false) -} - -func (b *Decimal256Builder) AppendNulls(n int) { - for i := 0; i < n; i++ { - b.AppendNull() - } -} - -func (b *Decimal256Builder) AppendEmptyValue() { - b.Append(decimal256.Num{}) -} - -func (b *Decimal256Builder) AppendEmptyValues(n int) { - for i := 0; i < n; i++ { - b.AppendEmptyValue() - } -} - -func (b *Decimal256Builder) Type() arrow.DataType { return b.dtype } - -func (b *Decimal256Builder) UnsafeAppendBoolToBitmap(isValid bool) { - if isValid { - bitutil.SetBit(b.nullBitmap.Bytes(), b.length) - } else { - b.nulls++ - } - b.length++ -} - -// AppendValues will append the values in the v slice. The valid slice determines which values -// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, -// all values in v are appended and considered valid. -func (b *Decimal256Builder) AppendValues(v []decimal256.Num, valid []bool) { - if len(v) != len(valid) && len(valid) != 0 { - panic("arrow/array: len(v) != len(valid) && len(valid) != 0") - } - - if len(v) == 0 { - return - } - - b.Reserve(len(v)) - if len(v) > 0 { - arrow.Decimal256Traits.Copy(b.rawData[b.length:], v) - } - b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) -} - -func (b *Decimal256Builder) init(capacity int) { - b.builder.init(capacity) - - b.data = memory.NewResizableBuffer(b.mem) - bytesN := arrow.Decimal256Traits.BytesRequired(capacity) - b.data.Resize(bytesN) - b.rawData = arrow.Decimal256Traits.CastFromBytes(b.data.Bytes()) -} - -// Reserve ensures there is enough space for appending n elements -// by checking the capacity and calling Resize if necessary. -func (b *Decimal256Builder) Reserve(n int) { - b.builder.reserve(n, b.Resize) -} - -// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), -// additional memory will be allocated. If n is smaller, the allocated memory may reduced. -func (b *Decimal256Builder) Resize(n int) { - nBuilder := n - if n < minBuilderCapacity { - n = minBuilderCapacity - } - - if b.capacity == 0 { - b.init(n) - } else { - b.builder.resize(nBuilder, b.init) - b.data.Resize(arrow.Decimal256Traits.BytesRequired(n)) - b.rawData = arrow.Decimal256Traits.CastFromBytes(b.data.Bytes()) - } -} - -// NewArray creates a Decimal256 array from the memory buffers used by the builder and resets the Decimal256Builder -// so it can be used to build a new array. -func (b *Decimal256Builder) NewArray() arrow.Array { - return b.NewDecimal256Array() -} - -// NewDecimal256Array creates a Decimal256 array from the memory buffers used by the builder and resets the Decimal256Builder -// so it can be used to build a new array. -func (b *Decimal256Builder) NewDecimal256Array() (a *Decimal256) { - data := b.newData() - a = NewDecimal256Data(data) - data.Release() - return -} - -func (b *Decimal256Builder) newData() (data *Data) { - bytesRequired := arrow.Decimal256Traits.BytesRequired(b.length) - if bytesRequired > 0 && bytesRequired < b.data.Len() { - // trim buffers - b.data.Resize(bytesRequired) - } - data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) - b.reset() - - if b.data != nil { - b.data.Release() - b.data = nil - b.rawData = nil - } - - return -} - -func (b *Decimal256Builder) AppendValueFromString(s string) error { - if s == NullValueStr { - b.AppendNull() - return nil - } - val, err := decimal256.FromString(s, b.dtype.Precision, b.dtype.Scale) - if err != nil { - b.AppendNull() - return err - } - b.Append(val) - return nil -} - -func (b *Decimal256Builder) UnmarshalOne(dec *json.Decoder) error { - t, err := dec.Token() - if err != nil { - return err - } - - switch v := t.(type) { - case float64: - val, err := decimal256.FromFloat64(v, b.dtype.Precision, b.dtype.Scale) - if err != nil { - return err - } - b.Append(val) - case string: - out, err := decimal256.FromString(v, b.dtype.Precision, b.dtype.Scale) - if err != nil { - return err - } - b.Append(out) - case json.Number: - out, err := decimal256.FromString(v.String(), b.dtype.Precision, b.dtype.Scale) - if err != nil { - return err - } - b.Append(out) - case nil: - b.AppendNull() - return nil - default: - return &json.UnmarshalTypeError{ - Value: fmt.Sprint(t), - Type: reflect.TypeOf(decimal256.Num{}), - Offset: dec.InputOffset(), - } - } - - return nil -} - -func (b *Decimal256Builder) Unmarshal(dec *json.Decoder) error { - for dec.More() { - if err := b.UnmarshalOne(dec); err != nil { - return err - } - } - return nil -} - -// UnmarshalJSON will add the unmarshalled values to this builder. -// -// If the values are strings, they will get parsed with big.ParseFloat using -// a rounding mode of big.ToNearestAway currently. -func (b *Decimal256Builder) UnmarshalJSON(data []byte) error { - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return err - } - - if delim, ok := t.(json.Delim); !ok || delim != '[' { - return fmt.Errorf("arrow/array: decimal256 builder must unpack from json array, found %s", delim) - } - - return b.Unmarshal(dec) -} - -var ( - _ arrow.Array = (*Decimal256)(nil) - _ Builder = (*Decimal256Builder)(nil) -) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go deleted file mode 100644 index f5321145..00000000 --- a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go +++ /dev/null @@ -1,201 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by the FlatBuffers compiler. DO NOT EDIT. - -package flatbuf - -import ( - flatbuffers "github.com/google/flatbuffers/go" -) - -/// Timestamp is a 64-bit signed integer representing an elapsed time since a -/// fixed epoch, stored in either of four units: seconds, milliseconds, -/// microseconds or nanoseconds, and is optionally annotated with a timezone. -/// -/// Timestamp values do not include any leap seconds (in other words, all -/// days are considered 86400 seconds long). -/// -/// Timestamps with a non-empty timezone -/// ------------------------------------ -/// -/// If a Timestamp column has a non-empty timezone value, its epoch is -/// 1970-01-01 00:00:00 (January 1st 1970, midnight) in the *UTC* timezone -/// (the Unix epoch), regardless of the Timestamp's own timezone. -/// -/// Therefore, timestamp values with a non-empty timezone correspond to -/// physical points in time together with some additional information about -/// how the data was obtained and/or how to display it (the timezone). -/// -/// For example, the timestamp value 0 with the timezone string "Europe/Paris" -/// corresponds to "January 1st 1970, 00h00" in the UTC timezone, but the -/// application may prefer to display it as "January 1st 1970, 01h00" in -/// the Europe/Paris timezone (which is the same physical point in time). -/// -/// One consequence is that timestamp values with a non-empty timezone -/// can be compared and ordered directly, since they all share the same -/// well-known point of reference (the Unix epoch). -/// -/// Timestamps with an unset / empty timezone -/// ----------------------------------------- -/// -/// If a Timestamp column has no timezone value, its epoch is -/// 1970-01-01 00:00:00 (January 1st 1970, midnight) in an *unknown* timezone. -/// -/// Therefore, timestamp values without a timezone cannot be meaningfully -/// interpreted as physical points in time, but only as calendar / clock -/// indications ("wall clock time") in an unspecified timezone. -/// -/// For example, the timestamp value 0 with an empty timezone string -/// corresponds to "January 1st 1970, 00h00" in an unknown timezone: there -/// is not enough information to interpret it as a well-defined physical -/// point in time. -/// -/// One consequence is that timestamp values without a timezone cannot -/// be reliably compared or ordered, since they may have different points of -/// reference. In particular, it is *not* possible to interpret an unset -/// or empty timezone as the same as "UTC". -/// -/// Conversion between timezones -/// ---------------------------- -/// -/// If a Timestamp column has a non-empty timezone, changing the timezone -/// to a different non-empty value is a metadata-only operation: -/// the timestamp values need not change as their point of reference remains -/// the same (the Unix epoch). -/// -/// However, if a Timestamp column has no timezone value, changing it to a -/// non-empty value requires to think about the desired semantics. -/// One possibility is to assume that the original timestamp values are -/// relative to the epoch of the timezone being set; timestamp values should -/// then adjusted to the Unix epoch (for example, changing the timezone from -/// empty to "Europe/Paris" would require converting the timestamp values -/// from "Europe/Paris" to "UTC", which seems counter-intuitive but is -/// nevertheless correct). -/// -/// Guidelines for encoding data from external libraries -/// ---------------------------------------------------- -/// -/// Date & time libraries often have multiple different data types for temporal -/// data. In order to ease interoperability between different implementations the -/// Arrow project has some recommendations for encoding these types into a Timestamp -/// column. -/// -/// An "instant" represents a physical point in time that has no relevant timezone -/// (for example, astronomical data). To encode an instant, use a Timestamp with -/// the timezone string set to "UTC", and make sure the Timestamp values -/// are relative to the UTC epoch (January 1st 1970, midnight). -/// -/// A "zoned date-time" represents a physical point in time annotated with an -/// informative timezone (for example, the timezone in which the data was -/// recorded). To encode a zoned date-time, use a Timestamp with the timezone -/// string set to the name of the timezone, and make sure the Timestamp values -/// are relative to the UTC epoch (January 1st 1970, midnight). -/// -/// (There is some ambiguity between an instant and a zoned date-time with the -/// UTC timezone. Both of these are stored the same in Arrow. Typically, -/// this distinction does not matter. If it does, then an application should -/// use custom metadata or an extension type to distinguish between the two cases.) -/// -/// An "offset date-time" represents a physical point in time combined with an -/// explicit offset from UTC. To encode an offset date-time, use a Timestamp -/// with the timezone string set to the numeric timezone offset string -/// (e.g. "+03:00"), and make sure the Timestamp values are relative to -/// the UTC epoch (January 1st 1970, midnight). -/// -/// A "naive date-time" (also called "local date-time" in some libraries) -/// represents a wall clock time combined with a calendar date, but with -/// no indication of how to map this information to a physical point in time. -/// Naive date-times must be handled with care because of this missing -/// information, and also because daylight saving time (DST) may make -/// some values ambiguous or non-existent. A naive date-time may be -/// stored as a struct with Date and Time fields. However, it may also be -/// encoded into a Timestamp column with an empty timezone. The timestamp -/// values should be computed "as if" the timezone of the date-time values -/// was UTC; for example, the naive date-time "January 1st 1970, 00h00" would -/// be encoded as timestamp value 0. -type Timestamp struct { - _tab flatbuffers.Table -} - -func GetRootAsTimestamp(buf []byte, offset flatbuffers.UOffsetT) *Timestamp { - n := flatbuffers.GetUOffsetT(buf[offset:]) - x := &Timestamp{} - x.Init(buf, n+offset) - return x -} - -func (rcv *Timestamp) Init(buf []byte, i flatbuffers.UOffsetT) { - rcv._tab.Bytes = buf - rcv._tab.Pos = i -} - -func (rcv *Timestamp) Table() flatbuffers.Table { - return rcv._tab -} - -func (rcv *Timestamp) Unit() TimeUnit { - o := flatbuffers.UOffsetT(rcv._tab.Offset(4)) - if o != 0 { - return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos)) - } - return 0 -} - -func (rcv *Timestamp) MutateUnit(n TimeUnit) bool { - return rcv._tab.MutateInt16Slot(4, int16(n)) -} - -/// The timezone is an optional string indicating the name of a timezone, -/// one of: -/// -/// * As used in the Olson timezone database (the "tz database" or -/// "tzdata"), such as "America/New_York". -/// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", -/// such as "+07:30". -/// -/// Whether a timezone string is present indicates different semantics about -/// the data (see above). -func (rcv *Timestamp) Timezone() []byte { - o := flatbuffers.UOffsetT(rcv._tab.Offset(6)) - if o != 0 { - return rcv._tab.ByteVector(o + rcv._tab.Pos) - } - return nil -} - -/// The timezone is an optional string indicating the name of a timezone, -/// one of: -/// -/// * As used in the Olson timezone database (the "tz database" or -/// "tzdata"), such as "America/New_York". -/// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX", -/// such as "+07:30". -/// -/// Whether a timezone string is present indicates different semantics about -/// the data (see above). -func TimestampStart(builder *flatbuffers.Builder) { - builder.StartObject(2) -} -func TimestampAddUnit(builder *flatbuffers.Builder, unit TimeUnit) { - builder.PrependInt16Slot(0, int16(unit), 0) -} -func TimestampAddTimezone(builder *flatbuffers.Builder, timezone flatbuffers.UOffsetT) { - builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(timezone), 0) -} -func TimestampEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { - return builder.EndObject() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go new file mode 100644 index 00000000..6504a218 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go @@ -0,0 +1,18 @@ +package aws + +// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing. +type AccountIDEndpointMode string + +const ( + // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing + AccountIDEndpointModeUnset AccountIDEndpointMode = "" + + // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present + AccountIDEndpointModePreferred = "preferred" + + // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity + AccountIDEndpointModeRequired = "required" + + // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing + AccountIDEndpointModeDisabled = "disabled" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go new file mode 100644 index 00000000..4152caad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go @@ -0,0 +1,33 @@ +package aws + +// RequestChecksumCalculation controls request checksum calculation workflow +type RequestChecksumCalculation int + +const ( + // RequestChecksumCalculationUnset is the unset value for RequestChecksumCalculation + RequestChecksumCalculationUnset RequestChecksumCalculation = iota + + // RequestChecksumCalculationWhenSupported indicates request checksum will be calculated + // if the operation supports input checksums + RequestChecksumCalculationWhenSupported + + // RequestChecksumCalculationWhenRequired indicates request checksum will be calculated + // if required by the operation or if user elects to set a checksum algorithm in request + RequestChecksumCalculationWhenRequired +) + +// ResponseChecksumValidation controls response checksum validation workflow +type ResponseChecksumValidation int + +const ( + // ResponseChecksumValidationUnset is the unset value for ResponseChecksumValidation + ResponseChecksumValidationUnset ResponseChecksumValidation = iota + + // ResponseChecksumValidationWhenSupported indicates response checksum will be validated + // if the operation supports output checksums + ResponseChecksumValidationWhenSupported + + // ResponseChecksumValidationWhenRequired indicates response checksum will only + // be validated if the operation requires output checksum validation + ResponseChecksumValidationWhenRequired +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index 2264200c..a015cc5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -162,6 +162,36 @@ type Config struct { // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or // the shared config profile attribute request_min_compression_size_bytes RequestMinCompressSizeBytes int64 + + // Controls how a resolved AWS account ID is handled for endpoint routing. + AccountIDEndpointMode AccountIDEndpointMode + + // RequestChecksumCalculation determines when request checksum calculation is performed. + // + // There are two possible values for this setting: + // + // 1. RequestChecksumCalculationWhenSupported (default): The checksum is always calculated + // if the operation supports it, regardless of whether the user sets an algorithm in the request. + // + // 2. RequestChecksumCalculationWhenRequired: The checksum is only calculated if the user + // explicitly sets a checksum algorithm in the request. + // + // This setting is sourced from the environment variable AWS_REQUEST_CHECKSUM_CALCULATION + // or the shared config profile attribute "request_checksum_calculation". + RequestChecksumCalculation RequestChecksumCalculation + + // ResponseChecksumValidation determines when response checksum validation is performed + // + // There are two possible values for this setting: + // + // 1. ResponseChecksumValidationWhenSupported (default): The checksum is always validated + // if the operation supports it, regardless of whether the user sets the validation mode to ENABLED in request. + // + // 2. ResponseChecksumValidationWhenRequired: The checksum is only validated if the user + // explicitly sets the validation mode to ENABLED in the request + // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or + // the shared config profile attribute "response_checksum_validation". + ResponseChecksumValidation ResponseChecksumValidation } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go index 781ac0ae..623890e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -172,6 +172,17 @@ func (p *CredentialsCache) getCreds() (Credentials, bool) { return *c, true } +// ProviderSources returns a list of where the underlying credential provider +// has been sourced, if available. Returns empty if the provider doesn't implement +// the interface +func (p *CredentialsCache) ProviderSources() []CredentialSource { + asSource, ok := p.provider.(CredentialProviderSource) + if !ok { + return []CredentialSource{} + } + return asSource.ProviderSources() +} + // Invalidate will invalidate the cached credentials. The next call to Retrieve // will cause the provider's Retrieve method to be called. func (p *CredentialsCache) Invalidate() { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 714d4ad8..4ad2ee44 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -70,6 +70,56 @@ func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") } +// CredentialSource is the source of the credential provider. +// A provider can have multiple credential sources: For example, a provider that reads a profile, calls ECS to +// get credentials and then assumes a role using STS will have all these as part of its provider chain. +type CredentialSource int + +const ( + // CredentialSourceUndefined is the sentinel zero value + CredentialSourceUndefined CredentialSource = iota + // CredentialSourceCode credentials resolved from code, cli parameters, session object, or client instance + CredentialSourceCode + // CredentialSourceEnvVars credentials resolved from environment variables + CredentialSourceEnvVars + // CredentialSourceEnvVarsSTSWebIDToken credentials resolved from environment variables for assuming a role with STS using a web identity token + CredentialSourceEnvVarsSTSWebIDToken + // CredentialSourceSTSAssumeRole credentials resolved from STS using AssumeRole + CredentialSourceSTSAssumeRole + // CredentialSourceSTSAssumeRoleSaml credentials resolved from STS using assume role with SAML + CredentialSourceSTSAssumeRoleSaml + // CredentialSourceSTSAssumeRoleWebID credentials resolved from STS using assume role with web identity + CredentialSourceSTSAssumeRoleWebID + // CredentialSourceSTSFederationToken credentials resolved from STS using a federation token + CredentialSourceSTSFederationToken + // CredentialSourceSTSSessionToken credentials resolved from STS using a session token S + CredentialSourceSTSSessionToken + // CredentialSourceProfile credentials resolved from a config file(s) profile with static credentials + CredentialSourceProfile + // CredentialSourceProfileSourceProfile credentials resolved from a source profile in a config file(s) profile + CredentialSourceProfileSourceProfile + // CredentialSourceProfileNamedProvider credentials resolved from a named provider in a config file(s) profile (like EcsContainer) + CredentialSourceProfileNamedProvider + // CredentialSourceProfileSTSWebIDToken credentials resolved from configuration for assuming a role with STS using web identity token in a config file(s) profile + CredentialSourceProfileSTSWebIDToken + // CredentialSourceProfileSSO credentials resolved from an SSO session in a config file(s) profile + CredentialSourceProfileSSO + // CredentialSourceSSO credentials resolved from an SSO session + CredentialSourceSSO + // CredentialSourceProfileSSOLegacy credentials resolved from an SSO session in a config file(s) profile using legacy format + CredentialSourceProfileSSOLegacy + // CredentialSourceSSOLegacy credentials resolved from an SSO session using legacy format + CredentialSourceSSOLegacy + // CredentialSourceProfileProcess credentials resolved from a process in a config file(s) profile + CredentialSourceProfileProcess + // CredentialSourceProcess credentials resolved from a process + CredentialSourceProcess + // CredentialSourceHTTP credentials resolved from an HTTP endpoint + CredentialSourceHTTP + // CredentialSourceIMDS credentials resolved from the instance metadata service (IMDS) + CredentialSourceIMDS +) + // A Credentials is the AWS credentials value for individual credential fields. type Credentials struct { // AWS Access key ID @@ -90,6 +140,9 @@ type Credentials struct { // The time the credentials will expire at. Should be ignored if CanExpire // is false. Expires time.Time + + // The ID of the account for the credentials. + AccountID string } // Expired returns if the credentials have expired. @@ -122,6 +175,13 @@ type CredentialsProvider interface { Retrieve(ctx context.Context) (Credentials, error) } +// CredentialProviderSource allows any credential provider to track +// all providers where a credential provider were sourced. For example, if the credentials came from a +// call to a role specified in the profile, this method will give the whole breadcrumb trail +type CredentialProviderSource interface { + ProviderSources() []CredentialSource +} + // CredentialsProviderFunc provides a helper wrapping a function value to // satisfy the CredentialsProvider interface. type CredentialsProviderFunc func(context.Context) (Credentials, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go index aa10a9b4..99edbf3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -70,6 +70,10 @@ func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found // The SDK will automatically resolve these endpoints per API client using an // internal endpoint resolvers. If you'd like to provide custom endpoint // resolving behavior you can implement the EndpointResolver interface. +// +// Deprecated: This structure was used with the global [EndpointResolver] +// interface, which has been deprecated in favor of service-specific endpoint +// resolution. See the deprecation docs on that interface for more information. type Endpoint struct { // The base URL endpoint the SDK API clients will use to make API calls to. // The SDK will suffix URI path and query elements to this endpoint. @@ -124,6 +128,8 @@ type Endpoint struct { } // EndpointSource is the endpoint source type. +// +// Deprecated: The global [Endpoint] structure is deprecated. type EndpointSource int const ( @@ -161,19 +167,25 @@ func (e *EndpointNotFoundError) Unwrap() error { // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. // -// Deprecated: See EndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Setting a value for +// EndpointResolver on aws.Config or service client options will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. type EndpointResolver interface { ResolveEndpoint(service, region string) (Endpoint, error) } // EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. // -// Deprecated: See EndpointResolverWithOptionsFunc +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverFunc func(service, region string) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. -// -// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { return e(service, region) } @@ -184,11 +196,17 @@ func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, // available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptions interface { ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) } // EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 66d09630..8e930fc6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.24.1" +const goModuleVersion = "1.36.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go index 9bd0dfb1..6d5f0079 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go @@ -139,16 +139,16 @@ func AddRecordResponseTiming(stack *middleware.Stack) error { // raw response within the response metadata. type rawResponseKey struct{} -// addRawResponse middleware adds raw response on to the metadata -type addRawResponse struct{} +// AddRawResponse middleware adds raw response on to the metadata +type AddRawResponse struct{} // ID the identifier for the ClientRequestID -func (m *addRawResponse) ID() string { +func (m *AddRawResponse) ID() string { return "AddRawResponseToMetadata" } // HandleDeserialize adds raw response on the middleware metadata -func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -159,7 +159,7 @@ func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.Des // AddRawResponseToMetadata adds middleware to the middleware stack that // store raw response on to the metadata. func AddRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&addRawResponse{}, middleware.Before) + return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before) } // GetRawResponse returns raw response set on metadata diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go deleted file mode 100644 index b0133f4c..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go +++ /dev/null @@ -1,319 +0,0 @@ -// Package metrics implements metrics gathering for SDK development purposes. -// -// This package is designated as private and is intended for use only by the -// AWS client runtime. The exported API therein is not considered stable and -// is subject to breaking changes without notice. -package metrics - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/aws/smithy-go/middleware" -) - -const ( - // ServiceIDKey is the key for the service ID metric. - ServiceIDKey = "ServiceId" - // OperationNameKey is the key for the operation name metric. - OperationNameKey = "OperationName" - // ClientRequestIDKey is the key for the client request ID metric. - ClientRequestIDKey = "ClientRequestId" - // APICallDurationKey is the key for the API call duration metric. - APICallDurationKey = "ApiCallDuration" - // APICallSuccessfulKey is the key for the API call successful metric. - APICallSuccessfulKey = "ApiCallSuccessful" - // MarshallingDurationKey is the key for the marshalling duration metric. - MarshallingDurationKey = "MarshallingDuration" - // InThroughputKey is the key for the input throughput metric. - InThroughputKey = "InThroughput" - // OutThroughputKey is the key for the output throughput metric. - OutThroughputKey = "OutThroughput" - // RetryCountKey is the key for the retry count metric. - RetryCountKey = "RetryCount" - // HTTPStatusCodeKey is the key for the HTTP status code metric. - HTTPStatusCodeKey = "HttpStatusCode" - // AWSExtendedRequestIDKey is the key for the AWS extended request ID metric. - AWSExtendedRequestIDKey = "AwsExtendedRequestId" - // AWSRequestIDKey is the key for the AWS request ID metric. - AWSRequestIDKey = "AwsRequestId" - // BackoffDelayDurationKey is the key for the backoff delay duration metric. - BackoffDelayDurationKey = "BackoffDelayDuration" - // StreamThroughputKey is the key for the stream throughput metric. - StreamThroughputKey = "Throughput" - // ConcurrencyAcquireDurationKey is the key for the concurrency acquire duration metric. - ConcurrencyAcquireDurationKey = "ConcurrencyAcquireDuration" - // PendingConcurrencyAcquiresKey is the key for the pending concurrency acquires metric. - PendingConcurrencyAcquiresKey = "PendingConcurrencyAcquires" - // SigningDurationKey is the key for the signing duration metric. - SigningDurationKey = "SigningDuration" - // UnmarshallingDurationKey is the key for the unmarshalling duration metric. - UnmarshallingDurationKey = "UnmarshallingDuration" - // TimeToFirstByteKey is the key for the time to first byte metric. - TimeToFirstByteKey = "TimeToFirstByte" - // ServiceCallDurationKey is the key for the service call duration metric. - ServiceCallDurationKey = "ServiceCallDuration" - // EndpointResolutionDurationKey is the key for the endpoint resolution duration metric. - EndpointResolutionDurationKey = "EndpointResolutionDuration" - // AttemptNumberKey is the key for the attempt number metric. - AttemptNumberKey = "AttemptNumber" - // MaxConcurrencyKey is the key for the max concurrency metric. - MaxConcurrencyKey = "MaxConcurrency" - // AvailableConcurrencyKey is the key for the available concurrency metric. - AvailableConcurrencyKey = "AvailableConcurrency" -) - -// MetricPublisher provides the interface to provide custom MetricPublishers. -// PostRequestMetrics will be invoked by the MetricCollection middleware to post request. -// PostStreamMetrics will be invoked by ReadCloserWithMetrics to post stream metrics. -type MetricPublisher interface { - PostRequestMetrics(*MetricData) error - PostStreamMetrics(*MetricData) error -} - -// Serializer provides the interface to provide custom Serializers. -// Serialize will transform any input object in its corresponding string representation. -type Serializer interface { - Serialize(obj interface{}) (string, error) -} - -// DefaultSerializer is an implementation of the Serializer interface. -type DefaultSerializer struct{} - -// Serialize uses the default JSON serializer to obtain the string representation of an object. -func (DefaultSerializer) Serialize(obj interface{}) (string, error) { - bytes, err := json.Marshal(obj) - if err != nil { - return "", err - } - return string(bytes), nil -} - -type metricContextKey struct{} - -// MetricContext contains fields to store metric-related information. -type MetricContext struct { - connectionCounter *SharedConnectionCounter - publisher MetricPublisher - data *MetricData -} - -// MetricData stores the collected metric data. -type MetricData struct { - RequestStartTime time.Time - RequestEndTime time.Time - APICallDuration time.Duration - SerializeStartTime time.Time - SerializeEndTime time.Time - MarshallingDuration time.Duration - ResolveEndpointStartTime time.Time - ResolveEndpointEndTime time.Time - EndpointResolutionDuration time.Duration - InThroughput float64 - OutThroughput float64 - RetryCount int - Success uint8 - StatusCode int - ClientRequestID string - ServiceID string - OperationName string - PartitionID string - Region string - RequestContentLength int64 - Stream StreamMetrics - Attempts []AttemptMetrics -} - -// StreamMetrics stores metrics related to streaming data. -type StreamMetrics struct { - ReadDuration time.Duration - ReadBytes int64 - Throughput float64 -} - -// AttemptMetrics stores metrics related to individual attempts. -type AttemptMetrics struct { - ServiceCallStart time.Time - ServiceCallEnd time.Time - ServiceCallDuration time.Duration - FirstByteTime time.Time - TimeToFirstByte time.Duration - ConnRequestedTime time.Time - ConnObtainedTime time.Time - ConcurrencyAcquireDuration time.Duration - CredentialFetchStartTime time.Time - CredentialFetchEndTime time.Time - SignStartTime time.Time - SignEndTime time.Time - SigningDuration time.Duration - DeserializeStartTime time.Time - DeserializeEndTime time.Time - UnMarshallingDuration time.Duration - RetryDelay time.Duration - ResponseContentLength int64 - StatusCode int - RequestID string - ExtendedRequestID string - HTTPClient string - MaxConcurrency int - PendingConnectionAcquires int - AvailableConcurrency int - ActiveRequests int - ReusedConnection bool -} - -// Data returns the MetricData associated with the MetricContext. -func (mc *MetricContext) Data() *MetricData { - return mc.data -} - -// ConnectionCounter returns the SharedConnectionCounter associated with the MetricContext. -func (mc *MetricContext) ConnectionCounter() *SharedConnectionCounter { - return mc.connectionCounter -} - -// Publisher returns the MetricPublisher associated with the MetricContext. -func (mc *MetricContext) Publisher() MetricPublisher { - return mc.publisher -} - -// ComputeRequestMetrics calculates and populates derived metrics based on the collected data. -func (md *MetricData) ComputeRequestMetrics() { - - for idx := range md.Attempts { - attempt := &md.Attempts[idx] - attempt.ConcurrencyAcquireDuration = attempt.ConnObtainedTime.Sub(attempt.ConnRequestedTime) - attempt.SigningDuration = attempt.SignEndTime.Sub(attempt.SignStartTime) - attempt.UnMarshallingDuration = attempt.DeserializeEndTime.Sub(attempt.DeserializeStartTime) - attempt.TimeToFirstByte = attempt.FirstByteTime.Sub(attempt.ServiceCallStart) - attempt.ServiceCallDuration = attempt.ServiceCallEnd.Sub(attempt.ServiceCallStart) - } - - md.APICallDuration = md.RequestEndTime.Sub(md.RequestStartTime) - md.MarshallingDuration = md.SerializeEndTime.Sub(md.SerializeStartTime) - md.EndpointResolutionDuration = md.ResolveEndpointEndTime.Sub(md.ResolveEndpointStartTime) - - md.RetryCount = len(md.Attempts) - 1 - - latestAttempt, err := md.LatestAttempt() - - if err != nil { - fmt.Printf("error retrieving attempts data due to: %s. Skipping Throughput metrics", err.Error()) - } else { - - md.StatusCode = latestAttempt.StatusCode - - if md.Success == 1 { - if latestAttempt.ResponseContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { - md.InThroughput = float64(latestAttempt.ResponseContentLength) / latestAttempt.ServiceCallDuration.Seconds() - } - if md.RequestContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { - md.OutThroughput = float64(md.RequestContentLength) / latestAttempt.ServiceCallDuration.Seconds() - } - } - } -} - -// LatestAttempt returns the latest attempt metrics. -// It returns an error if no attempts are initialized. -func (md *MetricData) LatestAttempt() (*AttemptMetrics, error) { - if md.Attempts == nil || len(md.Attempts) == 0 { - return nil, fmt.Errorf("no attempts initialized. NewAttempt() should be called first") - } - return &md.Attempts[len(md.Attempts)-1], nil -} - -// NewAttempt initializes new attempt metrics. -func (md *MetricData) NewAttempt() { - if md.Attempts == nil { - md.Attempts = []AttemptMetrics{} - } - md.Attempts = append(md.Attempts, AttemptMetrics{}) -} - -// SharedConnectionCounter is a counter shared across API calls. -type SharedConnectionCounter struct { - mu sync.Mutex - - activeRequests int - pendingConnectionAcquire int -} - -// ActiveRequests returns the count of active requests. -func (cc *SharedConnectionCounter) ActiveRequests() int { - cc.mu.Lock() - defer cc.mu.Unlock() - - return cc.activeRequests -} - -// PendingConnectionAcquire returns the count of pending connection acquires. -func (cc *SharedConnectionCounter) PendingConnectionAcquire() int { - cc.mu.Lock() - defer cc.mu.Unlock() - - return cc.pendingConnectionAcquire -} - -// AddActiveRequest increments the count of active requests. -func (cc *SharedConnectionCounter) AddActiveRequest() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.activeRequests++ -} - -// RemoveActiveRequest decrements the count of active requests. -func (cc *SharedConnectionCounter) RemoveActiveRequest() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.activeRequests-- -} - -// AddPendingConnectionAcquire increments the count of pending connection acquires. -func (cc *SharedConnectionCounter) AddPendingConnectionAcquire() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.pendingConnectionAcquire++ -} - -// RemovePendingConnectionAcquire decrements the count of pending connection acquires. -func (cc *SharedConnectionCounter) RemovePendingConnectionAcquire() { - cc.mu.Lock() - defer cc.mu.Unlock() - - cc.pendingConnectionAcquire-- -} - -// InitMetricContext initializes the metric context with the provided counter and publisher. -// It returns the updated context. -func InitMetricContext( - ctx context.Context, counter *SharedConnectionCounter, publisher MetricPublisher, -) context.Context { - if middleware.GetStackValue(ctx, metricContextKey{}) == nil { - ctx = middleware.WithStackValue(ctx, metricContextKey{}, &MetricContext{ - connectionCounter: counter, - publisher: publisher, - data: &MetricData{ - Attempts: []AttemptMetrics{}, - Stream: StreamMetrics{}, - }, - }) - } - return ctx -} - -// Context returns the metric context from the given context. -// It returns nil if the metric context is not found. -func Context(ctx context.Context) *MetricContext { - mctx := middleware.GetStackValue(ctx, metricContextKey{}) - if mctx == nil { - return nil - } - return mctx.(*MetricContext) -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go index 7ce48c61..128b60a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -4,6 +4,7 @@ import ( "context" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -11,18 +12,22 @@ import ( func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { // add error wrapper middleware before operation deserializers so that it can wrap the error response // returned by operation deserializers - return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before) + return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before) } -type requestIDRetriever struct { +// RequestIDRetriever middleware captures the AWS service request ID from the +// raw response. +type RequestIDRetriever struct { } // ID returns the middleware identifier -func (m *requestIDRetriever) ID() string { +func (m *RequestIDRetriever) ID() string { return "RequestIDRetriever" } -func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +// HandleDeserialize pulls the AWS request ID from the response, storing it in +// operation metadata. +func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -41,6 +46,9 @@ func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middlewar if v := resp.Header.Get(h); len(v) != 0 { // set reqID on metadata for successful responses. SetRequestIDMetadata(&metadata, v) + + span, _ := tracing.GetSpan(ctx) + span.SetProperty("aws.request_id", v) break } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index af3447dd..6ee3391b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "runtime" + "sort" "strings" "github.com/aws/aws-sdk-go-v2/aws" @@ -30,8 +31,12 @@ const ( FrameworkMetadata AdditionalMetadata ApplicationIdentifier + FeatureMetadata2 ) +// Hardcoded value to specify which version of the user agent we're using +const uaMetadata = "ua/2.1" + func (k SDKAgentKeyType) string() string { switch k { case APIMetadata: @@ -50,6 +55,8 @@ func (k SDKAgentKeyType) string() string { return "lib" case ApplicationIdentifier: return "app" + case FeatureMetadata2: + return "m" case AdditionalMetadata: fallthrough default: @@ -64,12 +71,102 @@ var validChars = map[rune]bool{ '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, } -// requestUserAgent is a build middleware that set the User-Agent for the request. -type requestUserAgent struct { +// UserAgentFeature enumerates tracked SDK features. +type UserAgentFeature string + +// Enumerates UserAgentFeature. +const ( + UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) + + UserAgentFeatureWaiter = "B" + UserAgentFeaturePaginator = "C" + + UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) + UserAgentFeatureRetryModeStandard = "E" + UserAgentFeatureRetryModeAdaptive = "F" + + UserAgentFeatureS3Transfer = "G" + UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) + UserAgentFeatureS3CryptoV2 = "I" // n/a + UserAgentFeatureS3ExpressBucket = "J" + UserAgentFeatureS3AccessGrants = "K" // not yet implemented + + UserAgentFeatureGZIPRequestCompression = "L" + + UserAgentFeatureProtocolRPCV2CBOR = "M" + + UserAgentFeatureAccountIDEndpoint = "O" // DO NOT IMPLEMENT: rules output is not currently defined. SDKs should not parse endpoints for feature information. + UserAgentFeatureAccountIDModePreferred = "P" + UserAgentFeatureAccountIDModeDisabled = "Q" + UserAgentFeatureAccountIDModeRequired = "R" + + UserAgentFeatureRequestChecksumCRC32 = "U" + UserAgentFeatureRequestChecksumCRC32C = "V" + UserAgentFeatureRequestChecksumCRC64 = "W" + UserAgentFeatureRequestChecksumSHA1 = "X" + UserAgentFeatureRequestChecksumSHA256 = "Y" + UserAgentFeatureRequestChecksumWhenSupported = "Z" + UserAgentFeatureRequestChecksumWhenRequired = "a" + UserAgentFeatureResponseChecksumWhenSupported = "b" + UserAgentFeatureResponseChecksumWhenRequired = "c" + + UserAgentFeatureDynamoDBUserAgent = "d" // not yet implemented + + UserAgentFeatureCredentialsCode = "e" + UserAgentFeatureCredentialsJvmSystemProperties = "f" // n/a (this is not a JVM sdk) + UserAgentFeatureCredentialsEnvVars = "g" + UserAgentFeatureCredentialsEnvVarsStsWebIDToken = "h" + UserAgentFeatureCredentialsStsAssumeRole = "i" + UserAgentFeatureCredentialsStsAssumeRoleSaml = "j" // not yet implemented + UserAgentFeatureCredentialsStsAssumeRoleWebID = "k" + UserAgentFeatureCredentialsStsFederationToken = "l" // not yet implemented + UserAgentFeatureCredentialsStsSessionToken = "m" // not yet implemented + UserAgentFeatureCredentialsProfile = "n" + UserAgentFeatureCredentialsProfileSourceProfile = "o" + UserAgentFeatureCredentialsProfileNamedProvider = "p" + UserAgentFeatureCredentialsProfileStsWebIDToken = "q" + UserAgentFeatureCredentialsProfileSso = "r" + UserAgentFeatureCredentialsSso = "s" + UserAgentFeatureCredentialsProfileSsoLegacy = "t" + UserAgentFeatureCredentialsSsoLegacy = "u" + UserAgentFeatureCredentialsProfileProcess = "v" + UserAgentFeatureCredentialsProcess = "w" + UserAgentFeatureCredentialsBoto2ConfigFile = "x" // n/a (this is not boto/Python) + UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk) + UserAgentFeatureCredentialsHTTP = "z" + UserAgentFeatureCredentialsIMDS = "0" +) + +var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ + aws.CredentialSourceCode: UserAgentFeatureCredentialsCode, + aws.CredentialSourceEnvVars: UserAgentFeatureCredentialsEnvVars, + aws.CredentialSourceEnvVarsSTSWebIDToken: UserAgentFeatureCredentialsEnvVarsStsWebIDToken, + aws.CredentialSourceSTSAssumeRole: UserAgentFeatureCredentialsStsAssumeRole, + aws.CredentialSourceSTSAssumeRoleSaml: UserAgentFeatureCredentialsStsAssumeRoleSaml, + aws.CredentialSourceSTSAssumeRoleWebID: UserAgentFeatureCredentialsStsAssumeRoleWebID, + aws.CredentialSourceSTSFederationToken: UserAgentFeatureCredentialsStsFederationToken, + aws.CredentialSourceSTSSessionToken: UserAgentFeatureCredentialsStsSessionToken, + aws.CredentialSourceProfile: UserAgentFeatureCredentialsProfile, + aws.CredentialSourceProfileSourceProfile: UserAgentFeatureCredentialsProfileSourceProfile, + aws.CredentialSourceProfileNamedProvider: UserAgentFeatureCredentialsProfileNamedProvider, + aws.CredentialSourceProfileSTSWebIDToken: UserAgentFeatureCredentialsProfileStsWebIDToken, + aws.CredentialSourceProfileSSO: UserAgentFeatureCredentialsProfileSso, + aws.CredentialSourceSSO: UserAgentFeatureCredentialsSso, + aws.CredentialSourceProfileSSOLegacy: UserAgentFeatureCredentialsProfileSsoLegacy, + aws.CredentialSourceSSOLegacy: UserAgentFeatureCredentialsSsoLegacy, + aws.CredentialSourceProfileProcess: UserAgentFeatureCredentialsProfileProcess, + aws.CredentialSourceProcess: UserAgentFeatureCredentialsProcess, + aws.CredentialSourceHTTP: UserAgentFeatureCredentialsHTTP, + aws.CredentialSourceIMDS: UserAgentFeatureCredentialsIMDS, +} + +// RequestUserAgent is a build middleware that set the User-Agent for the request. +type RequestUserAgent struct { sdkAgent, userAgent *smithyhttp.UserAgentBuilder + features map[UserAgentFeature]struct{} } -// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the +// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the // request. // // User-Agent example: @@ -79,14 +176,16 @@ type requestUserAgent struct { // X-Amz-User-Agent example: // // aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 -func newRequestUserAgent() *requestUserAgent { +func NewRequestUserAgent() *RequestUserAgent { userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() addProductName(userAgent) + addUserAgentMetadata(userAgent) addProductName(sdkAgent) - r := &requestUserAgent{ + r := &RequestUserAgent{ sdkAgent: sdkAgent, userAgent: userAgent, + features: map[UserAgentFeature]struct{}{}, } addSDKMetadata(r) @@ -94,7 +193,7 @@ func newRequestUserAgent() *requestUserAgent { return r } -func addSDKMetadata(r *requestUserAgent) { +func addSDKMetadata(r *RequestUserAgent) { r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) @@ -108,6 +207,10 @@ func addProductName(builder *smithyhttp.UserAgentBuilder) { builder.AddKeyValue(aws.SDKName, aws.SDKVersion) } +func addUserAgentMetadata(builder *smithyhttp.UserAgentBuilder) { + builder.AddKey(uaMetadata) +} + // AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. func AddUserAgentKey(key string) func(*middleware.Stack) error { return func(stack *middleware.Stack) error { @@ -162,18 +265,18 @@ func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { return err } -func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) { - id := (*requestUserAgent)(nil).ID() +func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) { + id := (*RequestUserAgent)(nil).ID() bm, ok := stack.Build.Get(id) if !ok { - bm = newRequestUserAgent() + bm = NewRequestUserAgent() err := stack.Build.Add(bm, middleware.After) if err != nil { return nil, err } } - requestUserAgent, ok := bm.(*requestUserAgent) + requestUserAgent, ok := bm.(*RequestUserAgent) if !ok { return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) } @@ -182,34 +285,48 @@ func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error } // AddUserAgentKey adds the component identified by name to the User-Agent string. -func (u *requestUserAgent) AddUserAgentKey(key string) { +func (u *RequestUserAgent) AddUserAgentKey(key string) { u.userAgent.AddKey(strings.Map(rules, key)) } // AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { +func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) { u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) } -// AddUserAgentKey adds the component identified by name to the User-Agent string. -func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { +// AddUserAgentFeature adds the feature ID to the tracking list to be emitted +// in the final User-Agent string. +func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) { + u.features[feature] = struct{}{} +} + +// AddSDKAgentKey adds the component identified by name to the User-Agent string. +func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { // TODO: should target sdkAgent u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) } -// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { +// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { // TODO: should target sdkAgent u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) } +// AddCredentialsSource adds the credential source as a feature on the User-Agent string +func (u *RequestUserAgent) AddCredentialsSource(source aws.CredentialSource) { + x, ok := credentialSourceToFeature[source] + if ok { + u.AddUserAgentFeature(x) + } +} + // ID the name of the middleware. -func (u *requestUserAgent) ID() string { +func (u *RequestUserAgent) ID() string { return "UserAgent" } // HandleBuild adds or appends the constructed user agent to the request. -func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( +func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( out middleware.BuildOutput, metadata middleware.Metadata, err error, ) { switch req := in.Request.(type) { @@ -224,12 +341,15 @@ func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI return next.HandleBuild(ctx, in) } -func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { +func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { const userAgent = "User-Agent" + if len(u.features) > 0 { + updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) + } updateHTTPHeader(request, userAgent, u.userAgent.Build()) } -func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { +func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { const sdkAgent = "X-Amz-User-Agent" updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) } @@ -259,3 +379,13 @@ func rules(r rune) rune { return '-' } } + +func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string { + fs := make([]string, 0, len(features)) + for f := range features { + fs = append(fs, string(f)) + } + + sort.Strings(fs) + return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ",")) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index 1e1da56b..ddb162b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,47 @@ +# v1.6.10 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 + +# v1.6.9 (2025-02-14) + +* **Bug Fix**: Remove max limit on event stream messages + +# v1.6.8 (2025-01-24) + +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.6.7 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. + +# v1.6.6 (2024-10-04) + +* No change notes available for this release. + +# v1.6.5 (2024-09-20) + +* No change notes available for this release. + +# v1.6.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + +# v1.6.3 (2024-06-28) + +* No change notes available for this release. + +# v1.6.2 (2024-03-29) + +* No change notes available for this release. + +# v1.6.1 (2024-02-21) + +* No change notes available for this release. + +# v1.6.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. + # v1.5.4 (2023-12-07) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index 6759e90e..01981f46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.5.4" +const goModuleVersion = "1.6.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go index f7427da0..1a77654f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go @@ -10,9 +10,6 @@ const preludeLen = 8 const preludeCRCLen = 4 const msgCRCLen = 4 const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen -const maxPayloadLen = 1024 * 1024 * 16 // 16MB -const maxHeadersLen = 1024 * 128 // 128KB -const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen var crc32IEEETable = crc32.MakeTable(crc32.IEEE) @@ -82,28 +79,13 @@ func (p messagePrelude) PayloadLen() uint32 { } func (p messagePrelude) ValidateLens() error { - if p.Length == 0 || p.Length > maxMsgLen { + if p.Length == 0 { return LengthError{ Part: "message prelude", - Want: maxMsgLen, + Want: minMsgLen, Have: int(p.Length), } } - if p.HeadersLen > maxHeadersLen { - return LengthError{ - Part: "message headers", - Want: maxHeadersLen, - Have: int(p.HeadersLen), - } - } - if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { - return LengthError{ - Part: "message payload", - Want: maxPayloadLen, - Have: int(payloadLen), - } - } - return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go new file mode 100644 index 00000000..6669a3dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -0,0 +1,61 @@ +package query + +import ( + "net/url" + "strconv" +) + +// Array represents the encoding of Query lists and sets. A Query array is a +// representation of a list of values of a fixed type. A serialized array might +// look like the following: +// +// ListName.member.1=foo +// &ListName.member.2=bar +// &Listname.member.3=baz +type Array struct { + // The query values to add the array to. + values url.Values + // The array's prefix, which includes the names of all parent structures + // and ends with the name of the list. For example, the prefix might be + // "ParentStructure.ListName". This prefix will be used to form the full + // keys for each element in the list. For example, an entry might have the + // key "ParentStructure.ListName.member.MemberName.1". + // + // When the array is not flat the prefix will contain the memberName otherwise the memberName is ignored + prefix string + // Elements are stored in values, so we keep track of the list size here. + size int32 + // Empty lists are encoded as "=", if we add a value later we will + // remove this encoding + emptyValue Value +} + +func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + emptyValue := newValue(values, prefix, flat) + emptyValue.String("") + + if !flat { + // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead + prefix = prefix + keySeparator + memberName + } + + return &Array{ + values: values, + prefix: prefix, + emptyValue: emptyValue, + } +} + +// Value adds a new element to the Query Array. Returns a Value type used to +// encode the array element. +func (a *Array) Value() Value { + if a.size == 0 { + delete(a.values, a.emptyValue.key) + } + + // Query lists start a 1, so adjust the size first + a.size++ + // Lists can't have flat members + // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead + return newValue(a.values, a.prefix+keySeparator+strconv.FormatInt(int64(a.size), 10), false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go new file mode 100644 index 00000000..2ecf9241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go @@ -0,0 +1,80 @@ +package query + +import ( + "io" + "net/url" + "sort" +) + +// Encoder is a Query encoder that supports construction of Query body +// values using methods. +type Encoder struct { + // The query values that will be built up to manage encoding. + values url.Values + // The writer that the encoded body will be written to. + writer io.Writer + Value +} + +// NewEncoder returns a new Query body encoder +func NewEncoder(writer io.Writer) *Encoder { + values := url.Values{} + return &Encoder{ + values: values, + writer: writer, + Value: newBaseValue(values), + } +} + +// Encode returns the []byte slice representing the current +// state of the Query encoder. +func (e Encoder) Encode() error { + ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) + if !ok { + // Fall back to less optimal byte slice casting if WriteString isn't available. + ws = &wrapWriteString{writer: e.writer} + } + + // Get the keys and sort them to have a stable output + keys := make([]string, 0, len(e.values)) + for k := range e.values { + keys = append(keys, k) + } + sort.Strings(keys) + isFirstEntry := true + for _, key := range keys { + queryValues := e.values[key] + escapedKey := url.QueryEscape(key) + for _, value := range queryValues { + if !isFirstEntry { + if _, err := ws.WriteString(`&`); err != nil { + return err + } + } else { + isFirstEntry = false + } + if _, err := ws.WriteString(escapedKey); err != nil { + return err + } + if _, err := ws.WriteString(`=`); err != nil { + return err + } + if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { + return err + } + } + } + return nil +} + +// wrapWriteString wraps an io.Writer to provide a WriteString method +// where one is not available. +type wrapWriteString struct { + writer io.Writer +} + +// WriteString writes a string to the wrapped writer by casting it to +// a byte array first. +func (w wrapWriteString) WriteString(v string) (int, error) { + return w.writer.Write([]byte(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go new file mode 100644 index 00000000..dea242b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go @@ -0,0 +1,78 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Map represents the encoding of Query maps. A Query map is a representation +// of a mapping of arbitrary string keys to arbitrary values of a fixed type. +// A Map differs from an Object in that the set of keys is not fixed, in that +// the values must all be of the same type, and that map entries are ordered. +// A serialized map might look like the following: +// +// MapName.entry.1.key=Foo +// &MapName.entry.1.value=spam +// &MapName.entry.2.key=Bar +// &MapName.entry.2.value=eggs +type Map struct { + // The query values to add the map to. + values url.Values + // The map's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.MapName". This prefix will be used to form the full + // keys for each key-value pair of the map. For example, a value might have + // the key "ParentStructure.MapName.1.value". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the map is flat or not. A map that is not flat will produce the + // following entries to the url.Values for a given key-value pair: + // MapName.entry.1.KeyLocationName=mykey + // MapName.entry.1.ValueLocationName=myvalue + // A map that is flat will produce the following: + // MapName.1.KeyLocationName=mykey + // MapName.1.ValueLocationName=myvalue + flat bool + // The location name of the key. In most cases this should be "key". + keyLocationName string + // The location name of the value. In most cases this should be "value". + valueLocationName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { + return &Map{ + values: values, + prefix: prefix, + flat: flat, + keyLocationName: keyLocationName, + valueLocationName: valueLocationName, + } +} + +// Key adds the given named key to the Query map. +// Returns a Value encoder that should be used to encode a Query value type. +func (m *Map) Key(name string) Value { + // Query lists start a 1, so adjust the size first + m.size++ + var key string + var value string + if m.flat { + key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) + } else { + key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) + } + + // The key can only be a string, so we just go ahead and set it here + newValue(m.values, key, false).String(name) + + // Maps can't have flat members + return newValue(m.values, value, false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go new file mode 100644 index 00000000..36034479 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go @@ -0,0 +1,62 @@ +package query + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the +// operation serializer that will convert the query request body to a GET +// operation with the query message in the HTTP request querystring. +func AddAsGetRequestMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) +} + +type asGetRequest struct{} + +func (*asGetRequest) ID() string { return "Query:AsGetRequest" } + +func (m *asGetRequest) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) + } + + req.Method = "GET" + + // If the stream is not set, nothing else to do. + stream := req.GetStream() + if stream == nil { + return next.HandleSerialize(ctx, input) + } + + // Clear the stream since there will not be any body. + req.Header.Del("Content-Type") + req, err = req.SetStream(nil) + if err != nil { + return out, metadata, fmt.Errorf("unable update request body %w", err) + } + input.Request = req + + // Update request query with the body's query string value. + delim := "" + if len(req.URL.RawQuery) != 0 { + delim = "&" + } + + b, err := ioutil.ReadAll(stream) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request body %w", err) + } + req.URL.RawQuery += delim + string(b) + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go new file mode 100644 index 00000000..305a8ace --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -0,0 +1,68 @@ +package query + +import "net/url" + +// Object represents the encoding of Query structures and unions. A Query +// object is a representation of a mapping of string keys to arbitrary +// values where there is a fixed set of keys whose values each have their +// own known type. A serialized object might look like the following: +// +// ObjectName.Foo=value +// &ObjectName.Bar=5 +type Object struct { + // The query values to add the object to. + values url.Values + // The object's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.ObjectName". This prefix will be used to form the full + // keys for each member of the object. For example, a member might have the + // key "ParentStructure.ObjectName.MemberName". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string +} + +func newObject(values url.Values, prefix string) *Object { + return &Object{ + values: values, + prefix: prefix, + } +} + +// Key adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. +func (o *Object) Key(name string) Value { + return o.key(name, false) +} + +// KeyWithValues adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query list of values. +func (o *Object) KeyWithValues(name string) Value { + return o.keyWithValues(name, false) +} + +// FlatKey adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. The +// value will be flattened if it is a map or array. +func (o *Object) FlatKey(name string) Value { + return o.key(name, true) +} + +func (o *Object) key(name string, flatValue bool) Value { + if o.prefix != "" { + // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead + return newValue(o.values, o.prefix+keySeparator+name, flatValue) + } + return newValue(o.values, name, flatValue) +} + +func (o *Object) keyWithValues(name string, flatValue bool) Value { + if o.prefix != "" { + // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead + return newAppendValue(o.values, o.prefix+keySeparator+name, flatValue) + } + return newAppendValue(o.values, name, flatValue) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go new file mode 100644 index 00000000..8063c592 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -0,0 +1,117 @@ +package query + +import ( + "math/big" + "net/url" + + "github.com/aws/smithy-go/encoding/httpbinding" +) + +const keySeparator = "." + +// Value represents a Query Value type. +type Value struct { + // The query values to add the value to. + values url.Values + // The value's key, which will form the prefix for complex types. + key string + // Whether the value should be flattened or not if it's a flattenable type. + flat bool + queryValue httpbinding.QueryValue +} + +func newValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, false), + } +} + +func newAppendValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, true), + } +} + +func newBaseValue(values url.Values) Value { + return Value{ + values: values, + queryValue: httpbinding.NewQueryValue(nil, "", false), + } +} + +// Array returns a new Array encoder. +func (qv Value) Array(locationName string) *Array { + return newArray(qv.values, qv.key, qv.flat, locationName) +} + +// Object returns a new Object encoder. +func (qv Value) Object() *Object { + return newObject(qv.values, qv.key) +} + +// Map returns a new Map encoder. +func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { + return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) +} + +// Base64EncodeBytes encodes v as a base64 query string value. +// This is intended to enable compatibility with the JSON encoder. +func (qv Value) Base64EncodeBytes(v []byte) { + qv.queryValue.Blob(v) +} + +// Boolean encodes v as a query string value +func (qv Value) Boolean(v bool) { + qv.queryValue.Boolean(v) +} + +// String encodes v as a query string value +func (qv Value) String(v string) { + qv.queryValue.String(v) +} + +// Byte encodes v as a query string value +func (qv Value) Byte(v int8) { + qv.queryValue.Byte(v) +} + +// Short encodes v as a query string value +func (qv Value) Short(v int16) { + qv.queryValue.Short(v) +} + +// Integer encodes v as a query string value +func (qv Value) Integer(v int32) { + qv.queryValue.Integer(v) +} + +// Long encodes v as a query string value +func (qv Value) Long(v int64) { + qv.queryValue.Long(v) +} + +// Float encodes v as a query string value +func (qv Value) Float(v float32) { + qv.queryValue.Float(v) +} + +// Double encodes v as a query string value +func (qv Value) Double(v float64) { + qv.queryValue.Double(v) +} + +// BigInteger encodes v as a query string value +func (qv Value) BigInteger(v *big.Int) { + qv.queryValue.BigInteger(v) +} + +// BigDecimal encodes v as a query string value +func (qv Value) BigDecimal(v *big.Float) { + qv.queryValue.BigDecimal(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go new file mode 100644 index 00000000..1bce78a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go @@ -0,0 +1,85 @@ +package restjson + +import ( + "encoding/json" + "io" + "strings" + + "github.com/aws/smithy-go" +) + +// GetErrorInfo util looks for code, __type, and message members in the +// json body. These members are optionally available, and the function +// returns the value of member if it is available. This function is useful to +// identify the error code, msg in a REST JSON error response. +func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { + var errInfo struct { + Code string + Type string `json:"__type"` + Message string + } + + err = decoder.Decode(&errInfo) + if err != nil { + if err == io.EOF { + return errorType, message, nil + } + return errorType, message, err + } + + // assign error type + if len(errInfo.Code) != 0 { + errorType = errInfo.Code + } else if len(errInfo.Type) != 0 { + errorType = errInfo.Type + } + + // assign error message + if len(errInfo.Message) != 0 { + message = errInfo.Message + } + + // sanitize error + if len(errorType) != 0 { + errorType = SanitizeErrorCode(errorType) + } + + return errorType, message, nil +} + +// SanitizeErrorCode sanitizes the errorCode string . +// The rule for sanitizing is if a `:` character is present, then take only the +// contents before the first : character in the value. +// If a # character is present, then take only the contents after the +// first # character in the value. +func SanitizeErrorCode(errorCode string) string { + if strings.ContainsAny(errorCode, ":") { + errorCode = strings.SplitN(errorCode, ":", 2)[0] + } + + if strings.ContainsAny(errorCode, "#") { + errorCode = strings.SplitN(errorCode, "#", 2)[1] + } + + return errorCode +} + +// GetSmithyGenericAPIError returns smithy generic api error and an error interface. +// Takes in json decoder, and error Code string as args. The function retrieves error message +// and error code from the decoder body. If errorCode of length greater than 0 is passed in as +// an argument, it is used instead. +func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { + errorType, message, err := GetErrorInfo(decoder) + if err != nil { + return nil, err + } + + if len(errorCode) == 0 { + errorCode = errorType + } + + return &smithy.GenericAPIError{ + Code: errorCode, + Message: message, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go new file mode 100644 index 00000000..8c783641 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go @@ -0,0 +1,20 @@ +package ratelimit + +import "context" + +// None implements a no-op rate limiter which effectively disables client-side +// rate limiting (also known as "retry quotas"). +// +// GetToken does nothing and always returns a nil error. The returned +// token-release function does nothing, and always returns a nil error. +// +// AddTokens does nothing and always returns a nil error. +var None = &none{} + +type none struct{} + +func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) { + return func() error { return nil }, nil +} + +func (*none) AddTokens(v uint) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go new file mode 100644 index 00000000..bfa5bf7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go @@ -0,0 +1,51 @@ +package retry + +import ( + "context" + + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" +) + +type attemptMetrics struct { + Attempts metrics.Int64Counter + Errors metrics.Int64Counter + + AttemptDuration metrics.Float64Histogram +} + +func newAttemptMetrics(meter metrics.Meter) (*attemptMetrics, error) { + m := &attemptMetrics{} + var err error + + m.Attempts, err = meter.Int64Counter("client.call.attempts", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "{attempt}" + o.Description = "The number of attempts for an individual operation" + }) + if err != nil { + return nil, err + } + m.Errors, err = meter.Int64Counter("client.call.errors", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "{error}" + o.Description = "The number of errors for an operation" + }) + if err != nil { + return nil, err + } + m.AttemptDuration, err = meter.Float64Histogram("client.call.attempt_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes to connect to the service, send the request, and get back HTTP status code and headers (including time queued waiting to be sent)" + }) + if err != nil { + return nil, err + } + + return m, nil +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index dc703d48..52d59b04 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -2,17 +2,22 @@ package retry import ( "context" + "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" "strconv" "strings" "time" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go" + "github.com/aws/aws-sdk-go-v2/aws" awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" smithymiddle "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" "github.com/aws/smithy-go/transport/http" ) @@ -35,10 +40,17 @@ type Attempt struct { // attempts are reached. LogAttempts bool + // A Meter instance for recording retry-related metrics. + OperationMeter metrics.Meter + retryer aws.RetryerV2 requestCloner RequestCloner } +// define the threshold at which we will consider certain kind of errors to be probably +// caused by clock skew +const skewThreshold = 4 * time.Minute + // NewAttemptMiddleware returns a new Attempt retry middleware. func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { m := &Attempt{ @@ -48,6 +60,10 @@ func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optF for _, fn := range optFns { fn(m) } + if m.OperationMeter == nil { + m.OperationMeter = metrics.NopMeterProvider{}.Meter("") + } + return m } @@ -73,6 +89,11 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn maxAttempts := r.retryer.MaxAttempts() releaseRetryToken := nopRelease + retryMetrics, err := newAttemptMetrics(r.OperationMeter) + if err != nil { + return out, metadata, err + } + for { attemptNum++ attemptInput := in @@ -86,8 +107,29 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn AttemptClockSkew: attemptClockSkew, }) + // Setting clock skew to be used on other context (like signing) + ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) + var attemptResult AttemptResult + + attemptCtx, span := tracing.StartSpan(attemptCtx, "Attempt", func(o *tracing.SpanOptions) { + o.Properties.Set("operation.attempt", attemptNum) + }) + retryMetrics.Attempts.Add(ctx, 1, withOperationMetadata(ctx)) + + start := sdk.NowTime() out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) + elapsed := sdk.NowTime().Sub(start) + + retryMetrics.AttemptDuration.Record(ctx, float64(elapsed)/1e9, withOperationMetadata(ctx)) + if err != nil { + retryMetrics.Errors.Add(ctx, 1, withOperationMetadata(ctx), func(o *metrics.RecordMetricOptions) { + o.Properties.Set("exception.type", errorType(err)) + }) + } + + span.End() + attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) // AttemptResult Retried states that the attempt was not successful, and @@ -185,6 +227,8 @@ func (r *Attempt) handleAttempt( return out, attemptResult, nopRelease, err } + err = wrapAsClockSkew(ctx, err) + //------------------------------ // Is Retryable and Should Retry //------------------------------ @@ -226,13 +270,6 @@ func (r *Attempt) handleAttempt( // that time. Potentially early exist if the sleep is canceled via the // context. retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) - mctx := metrics.Context(ctx) - if mctx != nil { - attempt, err := mctx.Data().LatestAttempt() - if err != nil { - attempt.RetryDelay = retryDelay - } - } if reqErr != nil { return out, attemptResult, releaseRetryToken, reqErr } @@ -247,6 +284,37 @@ func (r *Attempt) handleAttempt( return out, attemptResult, releaseRetryToken, err } +// errors that, if detected when we know there's a clock skew, +// can be retried and have a high chance of success +var possibleSkewCodes = map[string]struct{}{ + "InvalidSignatureException": {}, + "SignatureDoesNotMatch": {}, + "AuthFailure": {}, +} + +var definiteSkewCodes = map[string]struct{}{ + "RequestExpired": {}, + "RequestInTheFuture": {}, + "RequestTimeTooSkewed": {}, +} + +// wrapAsClockSkew checks if this error could be related to a clock skew +// error and if so, wrap the error. +func wrapAsClockSkew(ctx context.Context, err error) error { + var v interface{ ErrorCode() string } + if !errors.As(err, &v) { + return err + } + if _, ok := definiteSkewCodes[v.ErrorCode()]; ok { + return &retryableClockSkewError{Err: err} + } + _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()] + if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode { + return &retryableClockSkewError{Err: err} + } + return err +} + // MetricsHeader attaches SDK request metric header for retries to the transport type MetricsHeader struct{} @@ -338,3 +406,13 @@ func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresO } return nil } + +// Determines the value of exception.type for metrics purposes. We prefer an +// API-specific error code, otherwise it's just the Go type for the value. +func errorType(err error) string { + var terr smithy.APIError + if errors.As(err, &terr) { + return terr.ErrorCode() + } + return fmt.Sprintf("%T", err) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index 987affdd..1b485f99 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -2,6 +2,7 @@ package retry import ( "errors" + "fmt" "net" "net/url" "strings" @@ -115,7 +116,13 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { case errors.As(err, &conErr) && conErr.ConnectionError(): retryable = true + case strings.Contains(err.Error(), "use of closed network connection"): + fallthrough case strings.Contains(err.Error(), "connection reset"): + // The errors "connection reset" and "use of closed network connection" + // are effectively the same. It appears to be the difference between + // sync and async read of TCP RST in the stdlib's net.Conn read loop. + // see #2737 retryable = true case errors.As(err, &urlErr): @@ -199,3 +206,23 @@ func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { return aws.TrueTernary } + +// retryableClockSkewError marks errors that can be caused by clock skew +// (difference between server time and client time). +// This is returned when there's certain confidence that adjusting the client time +// could allow a retry to succeed +type retryableClockSkewError struct{ Err error } + +func (e *retryableClockSkewError) Error() string { + return fmt.Sprintf("Probable clock skew error: %v", e.Err) +} + +// Unwrap returns the wrapped error. +func (e *retryableClockSkewError) Unwrap() error { + return e.Err +} + +// RetryableError allows the retryer to retry this request +func (e *retryableClockSkewError) RetryableError() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go index 25abffc8..d5ea9322 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -123,6 +123,17 @@ type StandardOptions struct { // Provides the rate limiting strategy for rate limiting attempt retries // across all attempts the retryer is being used with. + // + // A RateLimiter operates as a token bucket with a set capacity, where + // attempt failures events consume tokens. A retry attempt that attempts to + // consume more tokens than what's available results in operation failure. + // The default implementation is parameterized as follows: + // - a capacity of 500 (DefaultRetryRateTokens) + // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost) + // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost) + // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement) + // + // You can disable rate limiting by setting this field to ratelimit.None. RateLimiter RateLimiter // The cost to deduct from the RateLimiter's token bucket per retry. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index ca738f23..d99b32ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -4,10 +4,11 @@ package v4 var IgnoredHeaders = Rules{ ExcludeList{ MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, - "Expect": struct{}{}, + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + "Expect": struct{}{}, + "Transfer-Encoding": struct{}{}, }, }, } @@ -38,7 +39,6 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, @@ -46,7 +46,6 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Grant-Write-Acp": struct{}{}, "X-Amz-Metadata-Directive": struct{}{}, "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, "X-Amz-Server-Side-Encryption": struct{}{}, "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, "X-Amz-Server-Side-Encryption-Context": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index f39a369a..8a46220a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -11,11 +11,11 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -85,12 +85,12 @@ func (m *dynamicPayloadSigningMiddleware) HandleFinalize( } if req.IsHTTPS() { - return (&unsignedPayload{}).HandleFinalize(ctx, in, next) + return (&UnsignedPayload{}).HandleFinalize(ctx, in, next) } - return (&computePayloadSHA256{}).HandleFinalize(ctx, in, next) + return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next) } -// unsignedPayload sets the SigV4 request payload hash to unsigned. +// UnsignedPayload sets the SigV4 request payload hash to unsigned. // // Will not set the Unsigned Payload magic SHA value, if a SHA has already been // stored in the context. (e.g. application pre-computed SHA256 before making @@ -98,21 +98,21 @@ func (m *dynamicPayloadSigningMiddleware) HandleFinalize( // // This middleware does not check the X-Amz-Content-Sha256 header, if that // header is serialized a middleware must translate it into the context. -type unsignedPayload struct{} +type UnsignedPayload struct{} // AddUnsignedPayloadMiddleware adds unsignedPayload to the operation // middleware stack func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&unsignedPayload{}, "ResolveEndpointV2", middleware.After) + return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After) } // ID returns the unsignedPayload identifier -func (m *unsignedPayload) ID() string { +func (m *UnsignedPayload) ID() string { return computePayloadHashMiddlewareID } // HandleFinalize sets the payload hash magic value to the unsigned sentinel. -func (m *unsignedPayload) HandleFinalize( +func (m *UnsignedPayload) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, @@ -123,7 +123,7 @@ func (m *unsignedPayload) HandleFinalize( return next.HandleFinalize(ctx, in) } -// computePayloadSHA256 computes SHA256 payload hash to sign. +// ComputePayloadSHA256 computes SHA256 payload hash to sign. // // Will not set the Unsigned Payload magic SHA value, if a SHA has already been // stored in the context. (e.g. application pre-computed SHA256 before making @@ -131,12 +131,12 @@ func (m *unsignedPayload) HandleFinalize( // // This middleware does not check the X-Amz-Content-Sha256 header, if that // header is serialized a middleware must translate it into the context. -type computePayloadSHA256 struct{} +type ComputePayloadSHA256 struct{} // AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the // operation middleware stack func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&computePayloadSHA256{}, "ResolveEndpointV2", middleware.After) + return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) } // RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the @@ -147,13 +147,13 @@ func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { } // ID is the middleware name -func (m *computePayloadSHA256) ID() string { +func (m *ComputePayloadSHA256) ID() string { return computePayloadHashMiddlewareID } // HandleFinalize computes the payload hash for the request, storing it to the // context. This is a no-op if a caller has previously set that value. -func (m *computePayloadSHA256) HandleFinalize( +func (m *ComputePayloadSHA256) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, @@ -162,6 +162,9 @@ func (m *computePayloadSHA256) HandleFinalize( return next.HandleFinalize(ctx, in) } + _, span := tracing.StartSpan(ctx, "ComputePayloadSHA256") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &HashComputationError{ @@ -187,6 +190,7 @@ func (m *computePayloadSHA256) HandleFinalize( ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) + span.End() return next.HandleFinalize(ctx, in) } @@ -196,35 +200,35 @@ func (m *computePayloadSHA256) HandleFinalize( // Use this to disable computing the Payload SHA256 checksum and instead use // UNSIGNED-PAYLOAD for the SHA256 value. func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{}) return err } -// contentSHA256Header sets the X-Amz-Content-Sha256 header value to +// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to // the Payload hash stored in the context. -type contentSHA256Header struct{} +type ContentSHA256Header struct{} // AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the // operation middleware stack func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) + return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) } // RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware // from the operation middleware stack func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove((*contentSHA256Header)(nil).ID()) + _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID()) return err } // ID returns the ContentSHA256HeaderMiddleware identifier -func (m *contentSHA256Header) ID() string { +func (m *ContentSHA256Header) ID() string { return "SigV4ContentSHA256Header" } // HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash // stored in the context. -func (m *contentSHA256Header) HandleFinalize( +func (m *ContentSHA256Header) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, @@ -301,22 +305,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} } - mctx := metrics.Context(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchStartTime = sdk.NowTime() - } - } - credentials, err := s.credentialsProvider.Retrieve(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} } @@ -337,20 +326,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl }) } - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignStartTime = sdk.NowTime() - } - } - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} } @@ -360,18 +336,21 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return next.HandleFinalize(ctx, in) } -type streamingEventsPayload struct{} +// StreamingEventsPayload signs input event stream messages. +type StreamingEventsPayload struct{} // AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. func AddStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&streamingEventsPayload{}, middleware.Before) + return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before) } -func (s *streamingEventsPayload) ID() string { +// ID identifies the middleware. +func (s *StreamingEventsPayload) ID() string { return computePayloadHashMiddlewareID } -func (s *streamingEventsPayload) HandleFinalize( +// HandleFinalize marks the input stream to be signed with SigV4. +func (s *StreamingEventsPayload) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, @@ -393,8 +372,9 @@ func GetSignedRequestSignature(r *http.Request) ([]byte, error) { const authHeaderSignatureElem = "Signature=" if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { - ps := strings.Split(auth, ", ") + ps := strings.Split(auth, ",") for _, p := range ps { + p = strings.TrimSpace(p) if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { sig := p[len(authHeaderSignatureElem):] if len(sig) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go index bb61904e..7ed91d5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -1,48 +1,41 @@ -// Package v4 implements signing for AWS V4 signer +// Package v4 implements the AWS signature version 4 algorithm (commonly known +// as SigV4). // -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. +// For more information about SigV4, see [Signing AWS API requests] in the IAM +// user guide. // -// # Standalone Signer +// While this implementation CAN work in an external context, it is developed +// primarily for SDK use and you may encounter fringe behaviors around header +// canonicalization. // -// Generally using the signer outside of the SDK should not require any additional +// # Pre-escaping a request URI // -// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// AWS v4 signature validation requires that the canonical string's URI path +// component must be the escaped form of the HTTP request's path. +// +// The Go HTTP client will perform escaping automatically on the HTTP request. +// This may cause signature validation errors because the request differs from +// the URI path or query from which the signature was generated. // -// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. +// Because of this, we recommend that you explicitly escape the request when +// using this signer outside of the SDK to prevent possible signature mismatch. +// This can be done by setting URL.Opaque on the request. The signer will +// prefer that value, falling back to the return of URL.EscapedPath if unset. // -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: +// When setting URL.Opaque you must do so in the form of: // // "///" // // // e.g. // "//example.com/some/path" // -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath +// The leading "//" and hostname are required or the escaping will not work +// correctly. // -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. +// The TestStandaloneSign unit test provides a complete example of using the +// signer outside of the SDK and pre-escaping the URI path. // -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. +// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html package v4 import ( @@ -401,7 +394,18 @@ func (s *httpSigner) buildCredentialScope() string { func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { query := url.Values{} unsignedHeaders := http.Header{} + + // A list of headers to be converted to lower case to mitigate a limitation from S3 + lowerCaseHeaders := map[string]string{ + "X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508 + "X-Amz-Request-Payer": "x-amz-request-payer", // see #2764 + } + for k, h := range header { + if newKey, ok := lowerCaseHeaders[k]; ok { + k = newKey + } + if r.IsValid(k) { query[k] = h } else { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go index 26d90719..8d7c35a9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -1,13 +1,16 @@ package http import ( + "context" "crypto/tls" - "github.com/aws/aws-sdk-go-v2/aws" "net" "net/http" "reflect" "sync" "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/tracing" ) // Defaults for the HTTPTransportBuilder. @@ -179,7 +182,7 @@ func defaultHTTPTransport() *http.Transport { tr := &http.Transport{ Proxy: http.ProxyFromEnvironment, - DialContext: dialer.DialContext, + DialContext: traceDialContext(dialer.DialContext), TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, MaxIdleConns: DefaultHTTPTransportMaxIdleConns, MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, @@ -194,6 +197,35 @@ func defaultHTTPTransport() *http.Transport { return tr } +type dialContext func(ctx context.Context, network, addr string) (net.Conn, error) + +func traceDialContext(dc dialContext) dialContext { + return func(ctx context.Context, network, addr string) (net.Conn, error) { + span, _ := tracing.GetSpan(ctx) + span.SetProperty("net.peer.name", addr) + + conn, err := dc(ctx, network, addr) + if err != nil { + return conn, err + } + + raddr := conn.RemoteAddr() + if raddr == nil { + return conn, err + } + + host, port, err := net.SplitHostPort(raddr.String()) + if err != nil { // don't blow up just because we couldn't parse + span.SetProperty("net.peer.addr", raddr.String()) + } else { + span.SetProperty("net.peer.host", host) + span.SetProperty("net.peer.port", port) + } + + return conn, err + } +} + // shallowCopyStruct creates a shallow copy of the passed in source struct, and // returns that copy of the same struct type. func shallowCopyStruct(src interface{}) interface{} { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go index 8fd14cec..a1ad20fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go @@ -12,18 +12,20 @@ import ( func AddResponseErrorMiddleware(stack *middleware.Stack) error { // add error wrapper middleware before request id retriever middleware so that it can wrap the error response // returned by operation deserializers - return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) } -type responseErrorWrapper struct { +// ResponseErrorWrapper wraps operation errors with ResponseError. +type ResponseErrorWrapper struct { } // ID returns the middleware identifier -func (m *responseErrorWrapper) ID() string { +func (m *ResponseErrorWrapper) ID() string { return "ResponseErrorWrapper" } -func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +// HandleDeserialize wraps the stack error with smithyhttp.ResponseError. +func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md new file mode 100644 index 00000000..e7174e02 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -0,0 +1,857 @@ +# v1.29.14 (2025-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.13 (2025-04-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.12 (2025-03-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.11 (2025-03-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.10 (2025-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.9 (2025-03-04.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.8 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.7 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.6 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.5 (2025-02-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.4 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.3 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.2 (2025-01-24) + +* **Bug Fix**: Fix env config naming and usage of deprecated ioutil +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.29.1 (2025-01-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2025-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.10 (2025-01-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.9 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2025-01-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-12-19) + +* **Bug Fix**: Fix improper use of printf-style functions. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.4 (2024-11-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.3 (2024-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.2 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.1 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-10-16) + +* **Feature**: Adds the LoadOptions hook `WithBaseEndpoint` for setting global endpoint override in-code. + +# v1.27.43 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.42 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.41 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.40 (2024-10-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.39 (2024-09-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.38 (2024-09-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.37 (2024-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.36 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.35 (2024-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.34 (2024-09-16) + +* **Bug Fix**: Read `AWS_CONTAINER_CREDENTIALS_FULL_URI` env variable if set when reading a profile with `credential_source`. Also ensure `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` is always read before it + +# v1.27.33 (2024-09-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.32 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.31 (2024-08-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.30 (2024-08-23) + +* **Bug Fix**: Don't fail credentials unit tests if credentials are found on a file + +# v1.27.29 (2024-08-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.28 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.17 (2024-06-03) + +* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.11 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.9 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.8 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.7 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.6 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.5 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.4 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.3 (2024-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2024-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.6 (2024-01-22) + +* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.4 (2024-01-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2023-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.1 (2023-12-08) + +* **Bug Fix**: Correct loading of [services *] sections into shared config. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2023-12-07) + +* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.12 (2023-12-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.11 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.10 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.9 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.8 (2023-11-28.3) + +* **Bug Fix**: Correct resolution of S3Express auth disable toggle. + +# v1.25.7 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.6 (2023-11-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.5 (2023-11-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.3 (2023-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.2 (2023-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.1 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-11-14) + +* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2023-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2023-11-06) + +* No change notes available for this release. + +# v1.22.0 (2023-11-02) + +* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-10-24) + +* No change notes available for this release. + +# v1.19.0 (2023-10-16) + +* **Feature**: Modify logic of retrieving user agent appID from env config + +# v1.18.45 (2023-10-12) + +* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.43 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.42 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.41 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.40 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.39 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.38 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.37 (2023-08-23) + +* No change notes available for this release. + +# v1.18.36 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.35 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.34 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.33 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.32 (2023-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.29 (2023-07-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.28 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.27 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.26 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.25 (2023-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.24 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.23 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.22 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.21 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.20 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.19 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.18 (2023-03-16) + +* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. + +# v1.18.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2023-02-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2023-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2022-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-12-15) + +* **Bug Fix**: Unify logic between shared config and in finding home directory +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-11-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-11-11) + +* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 +* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2022-11-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2022-09-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-08-14) + +* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. + +# v1.16.1 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-08-10) + +* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. + +# v1.15.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.14 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.13 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.12 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.11 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.10 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.9 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.8 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.7 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2022-05-09) + +* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) + +# v1.15.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-02-24) + +* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. +* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-01-28) + +* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. +* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-01-07) + +* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-12-02) + +* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Adds configuration setting for enabling endpoint discovery. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-20) + +* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. +* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go new file mode 100644 index 00000000..09d9b631 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -0,0 +1,228 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// defaultAWSConfigResolvers are a slice of functions that will resolve external +// configuration values into AWS configuration values. +// +// This will setup the AWS configuration's Region, +var defaultAWSConfigResolvers = []awsConfigResolver{ + // Resolves the default configuration the SDK's aws.Config will be + // initialized with. + resolveDefaultAWSConfig, + + // Sets the logger to be used. Could be user provided logger, and client + // logging mode. + resolveLogger, + resolveClientLogMode, + + // Sets the HTTP client and configuration to use for making requests using + // the HTTP transport. + resolveHTTPClient, + resolveCustomCABundle, + + // Sets the endpoint resolving behavior the API Clients will use for making + // requests to. Clients default to their own clients this allows overrides + // to be specified. The resolveEndpointResolver option is deprecated, but + // we still need to set it for backwards compatibility on config + // construction. + resolveEndpointResolver, + resolveEndpointResolverWithOptions, + + // Sets the retry behavior API clients will use within their retry attempt + // middleware. Defaults to unset, allowing API clients to define their own + // retry behavior. + resolveRetryer, + + // Sets the region the API Clients should use for making requests to. + resolveRegion, + resolveEC2IMDSRegion, + resolveDefaultRegion, + + // Sets the additional set of middleware stack mutators that will custom + // API client request pipeline middleware. + resolveAPIOptions, + + // Resolves the DefaultsMode that should be used by SDK clients. If this + // mode is set to DefaultsModeAuto. + // + // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is + // configured if provided before invoking IMDS if mode is auto. Comes + // before resolving credentials so that those subsequent clients use the + // configured auto mode. + resolveDefaultsModeOptions, + + // Sets the resolved credentials the API clients will use for + // authentication. Provides the SDK's default credential chain. + // + // Should probably be the last step in the resolve chain to ensure that all + // other configurations are resolved first in case downstream credentials + // implementations depend on or can be configured with earlier resolved + // configuration options. + resolveCredentials, + + // Sets the resolved bearer authentication token API clients will use for + // httpBearerAuth authentication scheme. + resolveBearerAuthToken, + + // Sets the sdk app ID if present in env var or shared config profile + resolveAppID, + + resolveBaseEndpoint, + + // Sets the DisableRequestCompression if present in env var or shared config profile + resolveDisableRequestCompression, + + // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile + resolveRequestMinCompressSizeBytes, + + // Sets the AccountIDEndpointMode if present in env var or shared config profile + resolveAccountIDEndpointMode, + + // Sets the RequestChecksumCalculation if present in env var or shared config profile + resolveRequestChecksumCalculation, + + // Sets the ResponseChecksumValidation if present in env var or shared config profile + resolveResponseChecksumValidation, +} + +// A Config represents a generic configuration value or set of values. This type +// will be used by the AWSConfigResolvers to extract +// +// General the Config type will use type assertion against the Provider interfaces +// to extract specific data from the Config. +type Config interface{} + +// A loader is used to load external configuration data and returns it as +// a generic Config type. +// +// The loader should return an error if it fails to load the external configuration +// or the configuration data is malformed, or required components missing. +type loader func(context.Context, configs) (Config, error) + +// An awsConfigResolver will extract configuration data from the configs slice +// using the provider interfaces to extract specific functionality. The extracted +// configuration values will be written to the AWS Config value. +// +// The resolver should return an error if it it fails to extract the data, the +// data is malformed, or incomplete. +type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error + +// configs is a slice of Config values. These values will be used by the +// AWSConfigResolvers to extract external configuration values to populate the +// AWS Config type. +// +// Use AppendFromLoaders to add additional external Config values that are +// loaded from external sources. +// +// Use ResolveAWSConfig after external Config values have been added or loaded +// to extract the loaded configuration values into the AWS Config. +type configs []Config + +// AppendFromLoaders iterates over the slice of loaders passed in calling each +// loader function in order. The external config value returned by the loader +// will be added to the returned configs slice. +// +// If a loader returns an error this method will stop iterating and return +// that error. +func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { + for _, fn := range loaders { + cfg, err := fn(ctx, cs) + if err != nil { + return nil, err + } + + cs = append(cs, cfg) + } + + return cs, nil +} + +// ResolveAWSConfig returns a AWS configuration populated with values by calling +// the resolvers slice passed in. Each resolver is called in order. Any resolver +// may overwrite the AWS Configuration value of a previous resolver. +// +// If an resolver returns an error this method will return that error, and stop +// iterating over the resolvers. +func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { + var cfg aws.Config + + for _, fn := range resolvers { + if err := fn(ctx, &cfg, cs); err != nil { + return aws.Config{}, err + } + } + + return cfg, nil +} + +// ResolveConfig calls the provide function passing slice of configuration sources. +// This implements the aws.ConfigResolver interface. +func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { + var cfgs []interface{} + for i := range cs { + cfgs = append(cfgs, cs[i]) + } + return f(cfgs) +} + +// LoadDefaultConfig reads the SDK's default external configurations, and +// populates an AWS Config with the values from the external configurations. +// +// An optional variadic set of additional Config values can be provided as input +// that will be prepended to the configs slice. Use this to add custom configuration. +// The custom configurations must satisfy the respective providers for their data +// or the custom data will be ignored by the resolvers and config loaders. +// +// cfg, err := config.LoadDefaultConfig( context.TODO(), +// config.WithSharedConfigProfile("test-profile"), +// ) +// if err != nil { +// panic(fmt.Sprintf("failed loading config, %v", err)) +// } +// +// The default configuration sources are: +// * Environment Variables +// * Shared Configuration and Shared Credentials files. +func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { + var options LoadOptions + for _, optFn := range optFns { + if err := optFn(&options); err != nil { + return aws.Config{}, err + } + } + + // assign Load Options to configs + var cfgCpy = configs{options} + + cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) + if err != nil { + return aws.Config{}, err + } + + cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) + if err != nil { + return aws.Config{}, err + } + + return cfg, nil +} + +func resolveConfigLoaders(options *LoadOptions) []loader { + loaders := make([]loader, 2) + loaders[0] = loadEnvConfig + + // specification of a profile should cause a load failure if it doesn't exist + if os.Getenv(awsProfileEnv) != "" || options.SharedConfigProfile != "" { + loaders[1] = loadSharedConfig + } else { + loaders[1] = loadSharedConfigIgnoreNotExist + } + + return loaders +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go new file mode 100644 index 00000000..20b66367 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go @@ -0,0 +1,47 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +const execEnvVar = "AWS_EXECUTION_ENV" + +// DefaultsModeOptions is the set of options that are used to configure +type DefaultsModeOptions struct { + // The SDK configuration defaults mode. Defaults to legacy if not specified. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard + Mode aws.DefaultsMode + + // The EC2 Instance Metadata Client that should be used when performing environment + // discovery when aws.DefaultsModeAuto is set. + // + // If not specified the SDK will construct a client if the instance metadata service has not been disabled by + // the AWS_EC2_METADATA_DISABLED environment variable. + IMDSClient *imds.Client +} + +func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { + getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) + // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. + select { + case <-ctx.Done(): + return aws.RuntimeEnvironment{}, err + default: + } + + var imdsRegion string + if err == nil { + imdsRegion = getRegionOutput.Region + } + + return aws.RuntimeEnvironment{ + EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), + Region: envConfig.Region, + EC2InstanceMetadataRegion: imdsRegion, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go new file mode 100644 index 00000000..aab7164e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go @@ -0,0 +1,20 @@ +// Package config provides utilities for loading configuration from multiple +// sources that can be used to configure the SDK's API clients, and utilities. +// +// The config package will load configuration from environment variables, AWS +// shared configuration file (~/.aws/config), and AWS shared credentials file +// (~/.aws/credentials). +// +// Use the LoadDefaultConfig to load configuration from all the SDK's supported +// sources, and resolve credentials using the SDK's default credential chain. +// +// LoadDefaultConfig allows for a variadic list of additional Config sources that can +// provide one or more configuration values which can be used to programmatically control the resolution +// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. +// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will +// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources +// implement the same provider interface, priority will be handled by the order in which the sources were passed in. +// +// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider +// interface. These helpers should be used for overriding configuration programmatically at runtime. +package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go new file mode 100644 index 00000000..9db507e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -0,0 +1,918 @@ +package config + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" +) + +// CredentialsSourceName provides a name of the provider when config is +// loaded from environment. +const CredentialsSourceName = "EnvConfigCredentials" + +// Environment variables that will be read for configuration values. +const ( + awsAccessKeyIDEnv = "AWS_ACCESS_KEY_ID" + awsAccessKeyEnv = "AWS_ACCESS_KEY" + + awsSecretAccessKeyEnv = "AWS_SECRET_ACCESS_KEY" + awsSecretKeyEnv = "AWS_SECRET_KEY" + + awsSessionTokenEnv = "AWS_SESSION_TOKEN" + + awsContainerCredentialsFullURIEnv = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + awsContainerCredentialsRelativeURIEnv = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + awsContainerAuthorizationTokenEnv = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + + awsRegionEnv = "AWS_REGION" + awsDefaultRegionEnv = "AWS_DEFAULT_REGION" + + awsProfileEnv = "AWS_PROFILE" + awsDefaultProfileEnv = "AWS_DEFAULT_PROFILE" + + awsSharedCredentialsFileEnv = "AWS_SHARED_CREDENTIALS_FILE" + + awsConfigFileEnv = "AWS_CONFIG_FILE" + + awsCABundleEnv = "AWS_CA_BUNDLE" + + awsWebIdentityTokenFileEnv = "AWS_WEB_IDENTITY_TOKEN_FILE" + + awsRoleARNEnv = "AWS_ROLE_ARN" + awsRoleSessionNameEnv = "AWS_ROLE_SESSION_NAME" + + awsEnableEndpointDiscoveryEnv = "AWS_ENABLE_ENDPOINT_DISCOVERY" + + awsS3UseARNRegionEnv = "AWS_S3_USE_ARN_REGION" + + awsEc2MetadataServiceEndpointModeEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" + + awsEc2MetadataServiceEndpointEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + awsEc2MetadataDisabledEnv = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataV1DisabledEnv = "AWS_EC2_METADATA_V1_DISABLED" + + awsS3DisableMultiRegionAccessPointsEnv = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" + + awsUseDualStackEndpointEnv = "AWS_USE_DUALSTACK_ENDPOINT" + + awsUseFIPSEndpointEnv = "AWS_USE_FIPS_ENDPOINT" + + awsDefaultsModeEnv = "AWS_DEFAULTS_MODE" + + awsMaxAttemptsEnv = "AWS_MAX_ATTEMPTS" + awsRetryModeEnv = "AWS_RETRY_MODE" + awsSdkUaAppIDEnv = "AWS_SDK_UA_APP_ID" + + awsIgnoreConfiguredEndpointURLEnv = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" + awsEndpointURLEnv = "AWS_ENDPOINT_URL" + + awsDisableRequestCompressionEnv = "AWS_DISABLE_REQUEST_COMPRESSION" + awsRequestMinCompressionSizeBytesEnv = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" + + awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" + + awsAccountIDEnv = "AWS_ACCOUNT_ID" + awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" + + awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION" + awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION" +) + +var ( + credAccessEnvKeys = []string{ + awsAccessKeyIDEnv, + awsAccessKeyEnv, + } + credSecretEnvKeys = []string{ + awsSecretAccessKeyEnv, + awsSecretKeyEnv, + } + regionEnvKeys = []string{ + awsRegionEnv, + awsDefaultRegionEnv, + } + profileEnvKeys = []string{ + awsProfileEnv, + awsDefaultProfileEnv, + } +) + +// EnvConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type EnvConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Credentials aws.Credentials + + // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials + // using the endpointcreds.Provider + ContainerCredentialsEndpoint string + + // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve + // credentials from the container endpoint. + ContainerCredentialsRelativePath string + + // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization + // header when attempting to retrieve credentials from the container credentials endpoint. + ContainerAuthorizationToken string + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-west-2 + // AWS_DEFAULT_REGION=us-west-2 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // AWS_DEFAULT_PROFILE=my_profile + SharedConfigProfile string + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the config. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion *bool + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies if EC2 IMDSv1 fallback is disabled. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies if the S3 service should disable multi-region access points + // support. + // + // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK Defaults Mode used by services. + // + // AWS_DEFAULTS_MODE=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // AWS_MAX_ATTEMPTS=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // aws_retry_mode=standard + RetryMode aws.RetryMode + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string + + // determine if request compression is allowed, default to false + // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION + DisableRequestCompression *bool + + // inclusive threshold request body size to trigger compression, + // default to 10240 and must be within 0 and 10485760 bytes inclusive + // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES + RequestMinCompressSizeBytes *int64 + + // Whether S3Express auth is disabled. + // + // This will NOT prevent requests from being made to S3Express buckets, it + // will only bypass the modified endpoint routing and signing behaviors + // associated with the feature. + S3DisableExpressAuth *bool + + // Indicates whether account ID will be required/ignored in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + + // Indicates whether request checksum should be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + + // Indicates whether response checksum should be validated + ResponseChecksumValidation aws.ResponseChecksumValidation +} + +// loadEnvConfig reads configuration values from the OS's environment variables. +// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. +func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { + return NewEnvConfig() +} + +// NewEnvConfig retrieves the SDK's environment configuration. +// See `EnvConfig` for the values that will be retrieved. +func NewEnvConfig() (EnvConfig, error) { + var cfg EnvConfig + + creds := aws.Credentials{ + Source: CredentialsSourceName, + } + setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) + setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) + if creds.HasKeys() { + creds.AccountID = os.Getenv(awsAccountIDEnv) + creds.SessionToken = os.Getenv(awsSessionTokenEnv) + cfg.Credentials = creds + } + + cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsFullURIEnv) + cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativeURIEnv) + cfg.ContainerAuthorizationToken = os.Getenv(awsContainerAuthorizationTokenEnv) + + setStringFromEnvVal(&cfg.Region, regionEnvKeys) + setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) + + cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnv) + cfg.SharedConfigFile = os.Getenv(awsConfigFileEnv) + + cfg.CustomCABundle = os.Getenv(awsCABundleEnv) + + cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFileEnv) + + cfg.RoleARN = os.Getenv(awsRoleARNEnv) + cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnv) + + cfg.AppID = os.Getenv(awsSdkUaAppIDEnv) + + if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompressionEnv}); err != nil { + return cfg, err + } + if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytesEnv}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { + return cfg, err + } + + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnv}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnv}); err != nil { + return cfg, err + } + + setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabledEnv}) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnv}); err != nil { + return cfg, err + } + cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnv) + if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnv}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointsEnv}); err != nil { + return cfg, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpointEnv}); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpointEnv}); err != nil { + return cfg, err + } + + if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultsModeEnv}); err != nil { + return cfg, err + } + + if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsMaxAttemptsEnv}); err != nil { + return cfg, err + } + if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryModeEnv}); err != nil { + return cfg, err + } + + setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURLEnv}) + + if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpointURLEnv}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil { + return cfg, err + } + + if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil { + return cfg, err + } + + if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil { + return cfg, err + } + if err := setResponseChecksumValidationFromEnvVal(&cfg.ResponseChecksumValidation, []string{awsResponseChecksumValidation}); err != nil { + return cfg, err + } + + return cfg, nil +} + +func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + return c.DefaultsMode, true, nil +} + +func (c EnvConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + +func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) { + if c.DisableRequestCompression == nil { + return false, false, nil + } + return *c.DisableRequestCompression, true, nil +} + +func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) { + if c.RequestMinCompressSizeBytes == nil { + return 0, false, nil + } + return *c.RequestMinCompressSizeBytes, true, nil +} + +func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + +func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) { + return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil +} + +func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) { + return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil +} + +// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, +// and not 0. +func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a +// valid value. +func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + return c.RetryMode, true, nil +} + +func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + switch { + case strings.EqualFold(value, "true"): + *state = imds.ClientDisabled + case strings.EqualFold(value, "false"): + *state = imds.ClientEnabled + default: + continue + } + break + } +} + +func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid %s value: %s", k, value) + } + break + } + } + return nil +} + +func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + *mode, err = aws.ParseRetryMode(value) + if err != nil { + return fmt.Errorf("invalid %s value, %w", k, err) + } + break + } + } + return nil +} + +func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + } + return nil +} + +func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch value { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value) + } + break + } + return nil +} + +func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch strings.ToLower(value) { + case checksumWhenSupported: + *m = aws.RequestChecksumCalculationWhenSupported + case checksumWhenRequired: + *m = aws.RequestChecksumCalculationWhenRequired + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) + } + } + return nil +} + +func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch strings.ToLower(value) { + case checksumWhenSupported: + *m = aws.ResponseChecksumValidationWhenSupported + case checksumWhenRequired: + *m = aws.ResponseChecksumValidationWhenRequired + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) + } + + } + return nil +} + +// GetRegion returns the AWS Region if set in the environment. Returns an empty +// string if not set. +func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetSharedConfigProfile returns the shared config profile if set in the +// environment. Returns an empty string if not set. +func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(c.SharedConfigProfile) == 0 { + return "", false, nil + } + + return c.SharedConfigProfile, true, nil +} + +// getSharedConfigFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Config +func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedConfigFile; len(v) > 0 { + files = append(files, v) + } + + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// getSharedCredentialsFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Credentials +func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedCredentialsFile; len(v) > 0 { + files = append(files, v) + } + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := os.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURLEnv, normalizeEnv(sdkID))); endpt != "" { + return endpt, true, nil + } + return "", false, nil +} + +func normalizeEnv(sdkID string) string { + upper := strings.ToUpper(sdkID) + return strings.ReplaceAll(upper, " ", "_") +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point +// support for the S3 client. +func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +func setStringFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func setIntFromEnvVal(dst *int, keys []string) error { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid value %s=%s, %w", k, v, err) + } + *dst = int(i) + break + } + } + + return nil +} + +func setBoolPtrFromEnvVal(dst **bool, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + if *dst == nil { + *dst = new(bool) + } + + switch { + case strings.EqualFold(value, "false"): + **dst = false + case strings.EqualFold(value, "true"): + **dst = true + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + k, value) + } + break + } + + return nil +} + +func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value) + } else if v < 0 || v > max { + return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v) + } + if *dst == nil { + *dst = new(int64) + } + + **dst = v + break + } + + return nil +} + +func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false or auto", + k, value) + } + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. +func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return c.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} + +// GetS3DisableExpressAuth returns the configured value for +// [EnvConfig.S3DisableExpressAuth]. +func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) { + if c.S3DisableExpressAuth == nil { + return false, false + } + + return *c.S3DisableExpressAuth, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go new file mode 100644 index 00000000..654a7a77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go @@ -0,0 +1,4 @@ +package config + +//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go +//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go new file mode 100644 index 00000000..8be8c01e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package config + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.29.14" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go new file mode 100644 index 00000000..0810ecf1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -0,0 +1,1209 @@ +package config + +import ( + "context" + "io" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// LoadOptionsFunc is a type alias for LoadOptions functional option +type LoadOptionsFunc func(*LoadOptions) error + +// LoadOptions are discrete set of options that are valid for loading the +// configuration +type LoadOptions struct { + + // Region is the region to send requests to. + Region string + + // Credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // Token provider for authentication operations with bearer authentication. + BearerAuthTokenProvider smithybearer.TokenProvider + + // HTTPClient the SDK's API clients will use to invoke HTTP requests. + HTTPClient HTTPClient + + // EndpointResolver that can be used to provide or override an endpoint for + // the given service and region. + // + // See the `aws.EndpointResolver` documentation on usage. + // + // Deprecated: See EndpointResolverWithOptions + EndpointResolver aws.EndpointResolver + + // EndpointResolverWithOptions that can be used to provide or override an + // endpoint for the given service and region. + // + // See the `aws.EndpointResolverWithOptions` documentation on usage. + EndpointResolverWithOptions aws.EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // This value will only be used if Retryer option is nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // This value will only be used if Retryer option is nil. + RetryMode aws.RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored. + Retryer func() aws.Retryer + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // Logger writer interface to write logging messages to. + Logger logging.Logger + + // ClientLogMode is used to configure the events that will be sent to the + // configured logger. This can be used to configure the logging of signing, + // retries, request, and responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode *aws.ClientLogMode + + // SharedConfigProfile is the profile to be used when loading the SharedConfig + SharedConfigProfile string + + // SharedConfigFiles is the slice of custom shared config files to use when + // loading the SharedConfig. A non-default profile used within config file + // must have name defined with prefix 'profile '. eg [profile xyz] + // indicates a profile with name 'xyz'. To read more on the format of the + // config file, please refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config + // + // If duplicate profiles are provided within the same, or across multiple + // shared config files, the next parsed profile will override only the + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedConfigFiles []string + + // SharedCredentialsFile is the slice of custom shared credentials files to + // use when loading the SharedConfig. The profile name used within + // credentials file must not prefix 'profile '. eg [xyz] indicates a + // profile with name 'xyz'. Profile declared as [profile xyz] will be + // ignored. To read more on the format of the credentials file, please + // refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds + // + // If duplicate profiles are provided with a same, or across multiple + // shared credentials files, the next parsed profile will override only + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedCredentialsFiles []string + + // CustomCABundle is CA bundle PEM bytes reader + CustomCABundle io.Reader + + // DefaultRegion is the fall back region, used if a region was not resolved + // from other sources + DefaultRegion string + + // UseEC2IMDSRegion indicates if SDK should retrieve the region + // from the EC2 Metadata service + UseEC2IMDSRegion *UseEC2IMDSRegion + + // CredentialsCacheOptions is a function for setting the + // aws.CredentialsCacheOptions + CredentialsCacheOptions func(*aws.CredentialsCacheOptions) + + // BearerAuthTokenCacheOptions is a function for setting the smithy-go + // auth/bearer#TokenCacheOptions + BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) + + // SSOTokenProviderOptions is a function for setting the + // credentials/ssocreds.SSOTokenProviderOptions + SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) + + // ProcessCredentialOptions is a function for setting + // the processcreds.Options + ProcessCredentialOptions func(*processcreds.Options) + + // EC2RoleCredentialOptions is a function for setting + // the ec2rolecreds.Options + EC2RoleCredentialOptions func(*ec2rolecreds.Options) + + // EndpointCredentialOptions is a function for setting + // the endpointcreds.Options + EndpointCredentialOptions func(*endpointcreds.Options) + + // WebIdentityRoleCredentialOptions is a function for setting + // the stscreds.WebIdentityRoleOptions + WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) + + // AssumeRoleCredentialOptions is a function for setting the + // stscreds.AssumeRoleOptions + AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) + + // SSOProviderOptions is a function for setting + // the ssocreds.Options + SSOProviderOptions func(options *ssocreds.Options) + + // LogConfigurationWarnings when set to true, enables logging + // configuration warnings + LogConfigurationWarnings *bool + + // S3UseARNRegion specifies if the S3 service should allow ARNs to direct + // the region, the client's requests are sent to. + S3UseARNRegion *bool + + // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable + // the S3 Multi-Region access points feature. + S3DisableMultiRegionAccessPoints *bool + + // EnableEndpointDiscovery specifies if endpoint discovery is enable for + // the client. + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK configuration mode for defaults. + DefaultsModeOptions DefaultsModeOptions + + // The sdk app ID retrieved from env var or shared config to be added to request user agent header + AppID string + + // Specifies whether an operation request could be compressed + DisableRequestCompression *bool + + // The inclusive min bytes of a request body that could be compressed + RequestMinCompressSizeBytes *int64 + + // Whether S3 Express auth is disabled. + S3DisableExpressAuth *bool + + // Whether account id should be built into endpoint resolution + AccountIDEndpointMode aws.AccountIDEndpointMode + + // Specify if request checksum should be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + + // Specifies if response checksum should be validated + ResponseChecksumValidation aws.ResponseChecksumValidation + + // Service endpoint override. This value is not necessarily final and is + // passed to the service's EndpointResolverV2 for further delegation. + BaseEndpoint string +} + +func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(o.DefaultsModeOptions.Mode) == 0 { + return "", false, nil + } + return o.DefaultsModeOptions.Mode, true, nil +} + +// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the +// LoadOptions and not 0. +func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + return o.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode specified in the LoadOptions. +func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(o.RetryMode) == 0 { + return "", false, nil + } + return o.RetryMode, true, nil +} + +func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { + if o.DefaultsModeOptions.IMDSClient == nil { + return nil, false, nil + } + return o.DefaultsModeOptions.IMDSClient, true, nil +} + +// getRegion returns Region from config's LoadOptions +func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { + if len(o.Region) == 0 { + return "", false, nil + } + + return o.Region, true, nil +} + +// getAppID returns AppID from config's LoadOptions +func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) { + return o.AppID, len(o.AppID) > 0, nil +} + +// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions +func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { + if o.DisableRequestCompression == nil { + return false, false, nil + } + return *o.DisableRequestCompression, true, nil +} + +// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions +func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { + if o.RequestMinCompressSizeBytes == nil { + return 0, false, nil + } + return *o.RequestMinCompressSizeBytes, true, nil +} + +func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil +} + +func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { + return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil +} + +func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { + return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil +} + +func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) { + return o.BaseEndpoint, o.BaseEndpoint != "", nil +} + +// GetServiceBaseEndpoint satisfies (internal/configsources).ServiceBaseEndpointProvider. +// +// The sdkID value is unused because LoadOptions only supports setting a GLOBAL +// endpoint override. In-code, per-service endpoint overrides are performed via +// functional options in service client space. +func (o LoadOptions) GetServiceBaseEndpoint(context.Context, string) (string, bool, error) { + return o.BaseEndpoint, o.BaseEndpoint != "", nil +} + +// WithRegion is a helper function to construct functional options +// that sets Region on config's LoadOptions. Setting the region to +// an empty string, will result in the region value being ignored. +// If multiple WithRegion calls are made, the last call overrides +// the previous call values. +func WithRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Region = v + return nil + } +} + +// WithAppID is a helper function to construct functional options +// that sets AppID on config's LoadOptions. +func WithAppID(ID string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AppID = ID + return nil + } +} + +// WithDisableRequestCompression is a helper function to construct functional options +// that sets DisableRequestCompression on config's LoadOptions. +func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + if DisableRequestCompression == nil { + return nil + } + o.DisableRequestCompression = DisableRequestCompression + return nil + } +} + +// WithRequestMinCompressSizeBytes is a helper function to construct functional options +// that sets RequestMinCompressSizeBytes on config's LoadOptions. +func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc { + return func(o *LoadOptions) error { + if RequestMinCompressSizeBytes == nil { + return nil + } + o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes + return nil + } +} + +// WithAccountIDEndpointMode is a helper function to construct functional options +// that sets AccountIDEndpointMode on config's LoadOptions +func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + if m != "" { + o.AccountIDEndpointMode = m + } + return nil + } +} + +// WithRequestChecksumCalculation is a helper function to construct functional options +// that sets RequestChecksumCalculation on config's LoadOptions +func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc { + return func(o *LoadOptions) error { + if c > 0 { + o.RequestChecksumCalculation = c + } + return nil + } +} + +// WithResponseChecksumValidation is a helper function to construct functional options +// that sets ResponseChecksumValidation on config's LoadOptions +func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ResponseChecksumValidation = v + return nil + } +} + +// getDefaultRegion returns DefaultRegion from config's LoadOptions +func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { + if len(o.DefaultRegion) == 0 { + return "", false, nil + } + + return o.DefaultRegion, true, nil +} + +// WithDefaultRegion is a helper function to construct functional options +// that sets a DefaultRegion on config's LoadOptions. Setting the default +// region to an empty string, will result in the default region value +// being ignored. If multiple WithDefaultRegion calls are made, the last +// call overrides the previous call values. Note that both WithRegion and +// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call +// when resolving region. +func WithDefaultRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.DefaultRegion = v + return nil + } +} + +// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions +func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(o.SharedConfigProfile) == 0 { + return "", false, nil + } + + return o.SharedConfigProfile, true, nil +} + +// WithSharedConfigProfile is a helper function to construct functional options +// that sets SharedConfigProfile on config's LoadOptions. Setting the shared +// config profile to an empty string, will result in the shared config profile +// value being ignored. +// If multiple WithSharedConfigProfile calls are made, the last call overrides +// the previous call values. +func WithSharedConfigProfile(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigProfile = v + return nil + } +} + +// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions +func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedConfigFiles == nil { + return nil, false, nil + } + + return o.SharedConfigFiles, true, nil +} + +// WithSharedConfigFiles is a helper function to construct functional options +// that sets slice of SharedConfigFiles on config's LoadOptions. +// Setting the shared config files to an nil string slice, will result in the +// shared config files value being ignored. +// If multiple WithSharedConfigFiles calls are made, the last call overrides +// the previous call values. +func WithSharedConfigFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigFiles = v + return nil + } +} + +// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions +func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedCredentialsFiles == nil { + return nil, false, nil + } + + return o.SharedCredentialsFiles, true, nil +} + +// WithSharedCredentialsFiles is a helper function to construct functional options +// that sets slice of SharedCredentialsFiles on config's LoadOptions. +// Setting the shared credentials files to an nil string slice, will result in the +// shared credentials files value being ignored. +// If multiple WithSharedCredentialsFiles calls are made, the last call overrides +// the previous call values. +func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedCredentialsFiles = v + return nil + } +} + +// getCustomCABundle returns CustomCABundle from LoadOptions +func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { + if o.CustomCABundle == nil { + return nil, false, nil + } + + return o.CustomCABundle, true, nil +} + +// WithCustomCABundle is a helper function to construct functional options +// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle +// to nil will result in custom CA Bundle value being ignored. +// If multiple WithCustomCABundle calls are made, the last call overrides the +// previous call values. +func WithCustomCABundle(v io.Reader) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CustomCABundle = v + return nil + } +} + +// UseEC2IMDSRegion provides a regionProvider that retrieves the region +// from the EC2 Metadata service. +type UseEC2IMDSRegion struct { + // If unset will default to generic EC2 IMDS client. + Client *imds.Client +} + +// getRegion attempts to retrieve the region from EC2 Metadata service. +func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { + if ctx == nil { + ctx = context.Background() + } + + client := p.Client + if client == nil { + client = imds.New(imds.Options{}) + } + + result, err := client.GetRegion(ctx, nil) + if err != nil { + return "", false, err + } + if len(result.Region) != 0 { + return result.Region, true, nil + } + return "", false, nil +} + +// getEC2IMDSRegion returns the value of EC2 IMDS region. +func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { + if o.UseEC2IMDSRegion == nil { + return "", false, nil + } + + return o.UseEC2IMDSRegion.getRegion(ctx) +} + +// WithEC2IMDSRegion is a helper function to construct functional options +// that enables resolving EC2IMDS region. The function takes +// in a UseEC2IMDSRegion functional option, and can be used to set the +// EC2IMDS client which will be used to resolve EC2IMDSRegion. +// If no functional option is provided, an EC2IMDS client is built and used +// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last +// call overrides the previous call values. Note that the WithRegion calls takes +// precedence over WithEC2IMDSRegion when resolving region. +func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} + + for _, fn := range fnOpts { + fn(o.UseEC2IMDSRegion) + } + return nil + } +} + +// getCredentialsProvider returns the credentials value +func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { + if o.Credentials == nil { + return nil, false, nil + } + + return o.Credentials, true, nil +} + +// WithCredentialsProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithCredentialsProvider calls are made, the last call overrides +// the previous call values. +func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Credentials = v + return nil + } +} + +// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions +func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { + if o.CredentialsCacheOptions == nil { + return nil, false, nil + } + + return o.CredentialsCacheOptions, true, nil +} + +// WithCredentialsCacheOptions is a helper function to construct functional +// options that sets a function to modify the aws.CredentialsCacheOptions the +// aws.CredentialsCache will be configured with, if the CredentialsCache is used +// by the configuration loader. +// +// If multiple WithCredentialsCacheOptions calls are made, the last call +// overrides the previous call values. +func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CredentialsCacheOptions = v + return nil + } +} + +// getBearerAuthTokenProvider returns the credentials value +func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { + if o.BearerAuthTokenProvider == nil { + return nil, false, nil + } + + return o.BearerAuthTokenProvider, true, nil +} + +// WithBearerAuthTokenProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenProvider = v + return nil + } +} + +// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { + if o.BearerAuthTokenCacheOptions == nil { + return nil, false, nil + } + + return o.BearerAuthTokenCacheOptions, true, nil +} + +// WithBearerAuthTokenCacheOptions is a helper function to construct functional options +// that sets a function to modify the TokenCacheOptions the smithy-go +// auth/bearer#TokenCache will be configured with, if the TokenCache is used by +// the configuration loader. +// +// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenCacheOptions = v + return nil + } +} + +// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { + if o.SSOTokenProviderOptions == nil { + return nil, false, nil + } + + return o.SSOTokenProviderOptions, true, nil +} + +// WithSSOTokenProviderOptions is a helper function to construct functional +// options that sets a function to modify the SSOtokenProviderOptions the SDK's +// credentials/ssocreds#SSOProvider will be configured with, if the +// SSOTokenProvider is used by the configuration loader. +// +// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOTokenProviderOptions = v + return nil + } +} + +// getProcessCredentialOptions returns the wrapped function to set processcreds.Options +func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { + if o.ProcessCredentialOptions == nil { + return nil, false, nil + } + + return o.ProcessCredentialOptions, true, nil +} + +// WithProcessCredentialOptions is a helper function to construct functional options +// that sets a function to use processcreds.Options on config's LoadOptions. +// If process credential options is set to nil, the process credential value will +// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ProcessCredentialOptions = v + return nil + } +} + +// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options +func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { + if o.EC2RoleCredentialOptions == nil { + return nil, false, nil + } + + return o.EC2RoleCredentialOptions, true, nil +} + +// WithEC2RoleCredentialOptions is a helper function to construct functional options +// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If +// EC2 role credential options is set to nil, the EC2 role credential options value +// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2RoleCredentialOptions = v + return nil + } +} + +// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options +func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { + if o.EndpointCredentialOptions == nil { + return nil, false, nil + } + + return o.EndpointCredentialOptions, true, nil +} + +// WithEndpointCredentialOptions is a helper function to construct functional options +// that sets a function to use endpointcreds.Options on config's LoadOptions. If +// endpoint credential options is set to nil, the endpoint credential options +// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointCredentialOptions = v + return nil + } +} + +// getWebIdentityRoleCredentialOptions returns the wrapped function +func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { + if o.WebIdentityRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.WebIdentityRoleCredentialOptions, true, nil +} + +// WithWebIdentityRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.WebIdentityRoleOptions +// on config's LoadOptions. If web identity role credentials options is set to nil, +// the web identity role credentials value will be ignored. If multiple +// WithWebIdentityRoleCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.WebIdentityRoleCredentialOptions = v + return nil + } +} + +// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { + if o.AssumeRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.AssumeRoleCredentialOptions, true, nil +} + +// WithAssumeRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.AssumeRoleOptions +// on config's LoadOptions. If assume role credentials options is set to nil, +// the assume role credentials value will be ignored. If multiple +// WithAssumeRoleCredentialOptions calls are made, the last call overrides +// the previous call values. +func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AssumeRoleCredentialOptions = v + return nil + } +} + +func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { + if o.HTTPClient == nil { + return nil, false, nil + } + + return o.HTTPClient, true, nil +} + +// WithHTTPClient is a helper function to construct functional options +// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, +// the HTTPClient value will be ignored. +// If multiple WithHTTPClient calls are made, the last call overrides +// the previous call values. +func WithHTTPClient(v HTTPClient) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.HTTPClient = v + return nil + } +} + +func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { + if o.APIOptions == nil { + return nil, false, nil + } + + return o.APIOptions, true, nil +} + +// WithAPIOptions is a helper function to construct functional options +// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the +// APIOptions value is ignored. If multiple WithAPIOptions calls are +// made, the last call overrides the previous call values. +func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { + return func(o *LoadOptions) error { + if v == nil { + return nil + } + + o.APIOptions = append(o.APIOptions, v...) + return nil + } +} + +func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return o.RetryMaxAttempts, true, nil +} + +// WithRetryMaxAttempts is a helper function to construct functional options that sets +// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is +// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMaxAttempts(v int) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMaxAttempts = v + return nil + } +} + +func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if o.RetryMode == "" { + return "", false, nil + } + + return o.RetryMode, true, nil +} + +// WithRetryMode is a helper function to construct functional options that sets +// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is +// ignored. If multiple WithRetryMode calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMode = v + return nil + } +} + +func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { + if o.Retryer == nil { + return nil, false, nil + } + + return o.Retryer, true, nil +} + +// WithRetryer is a helper function to construct functional options +// that sets Retryer on LoadOptions. If Retryer is set to nil, the +// Retryer value is ignored. If multiple WithRetryer calls are +// made, the last call overrides the previous call values. +func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Retryer = v + return nil + } +} + +func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { + if o.EndpointResolver == nil { + return nil, false, nil + } + + return o.EndpointResolver, true, nil +} + +// WithEndpointResolver is a helper function to construct functional options +// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +// +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Use of +// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. +func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolver = v + return nil + } +} + +func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { + if o.EndpointResolverWithOptions == nil { + return nil, false, nil + } + + return o.EndpointResolverWithOptions, true, nil +} + +// WithEndpointResolverWithOptions is a helper function to construct functional options +// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [WithEndpointResolver]. +func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolverWithOptions = v + return nil + } +} + +func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { + if o.Logger == nil { + return nil, false, nil + } + + return o.Logger, true, nil +} + +// WithLogger is a helper function to construct functional options +// that sets Logger on LoadOptions. If Logger is set to nil, the +// Logger value will be ignored. If multiple WithLogger calls are made, +// the last call overrides the previous call values. +func WithLogger(v logging.Logger) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Logger = v + return nil + } +} + +func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { + if o.ClientLogMode == nil { + return 0, false, nil + } + + return *o.ClientLogMode, true, nil +} + +// WithClientLogMode is a helper function to construct functional options +// that sets client log mode on LoadOptions. If client log mode is set to nil, +// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, +// the last call overrides the previous call values. +func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ClientLogMode = &v + return nil + } +} + +func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { + if o.LogConfigurationWarnings == nil { + return false, false, nil + } + return *o.LogConfigurationWarnings, true, nil +} + +// WithLogConfigurationWarnings is a helper function to construct +// functional options that can be used to set LogConfigurationWarnings +// on LoadOptions. +// +// If multiple WithLogConfigurationWarnings calls are made, the last call +// overrides the previous call values. +func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.LogConfigurationWarnings = &v + return nil + } +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { + if o.S3UseARNRegion == nil { + return false, false, nil + } + return *o.S3UseARNRegion, true, nil +} + +// WithS3UseARNRegion is a helper function to construct functional options +// that can be used to set S3UseARNRegion on LoadOptions. +// If multiple WithS3UseARNRegion calls are made, the last call overrides +// the previous call values. +func WithS3UseARNRegion(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3UseARNRegion = &v + return nil + } +} + +// GetS3DisableMultiRegionAccessPoints returns whether to disable +// the S3 multi-region access points feature. +func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) { + if o.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + return *o.S3DisableMultiRegionAccessPoints, true, nil +} + +// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options +// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions. +// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides +// the previous call values. +func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3DisableMultiRegionAccessPoints = &v + return nil + } +} + +// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. +func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + return o.EnableEndpointDiscovery, true, nil +} + +// WithEndpointDiscovery is a helper function to construct functional options +// that can be used to enable endpoint discovery on LoadOptions for supported clients. +// If multiple WithEndpointDiscovery calls are made, the last call overrides +// the previous call values. +func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EnableEndpointDiscovery = v + return nil + } +} + +// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { + if o.SSOProviderOptions == nil { + return nil, false, nil + } + + return o.SSOProviderOptions, true, nil +} + +// WithSSOProviderOptions is a helper function to construct +// functional options that sets a function to use ssocreds.Options +// on config's LoadOptions. If the SSO credential provider options is set to nil, +// the sso provider options value will be ignored. If multiple +// WithSSOProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOProviderOptions = v + return nil + } +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return o.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return o.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { + if len(o.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return o.EC2IMDSEndpoint, true, nil +} + +// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. +func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSClientEnableState = v + return nil + } +} + +// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. +func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpointMode = v + return nil + } +} + +// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. +func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpoint = v + return nil + } +} + +// WithUseDualStackEndpoint is a helper function to construct +// functional options that can be used to set UseDualStackEndpoint on LoadOptions. +func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseDualStackEndpoint = v + return nil + } +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + return o.UseDualStackEndpoint, true, nil +} + +// WithUseFIPSEndpoint is a helper function to construct +// functional options that can be used to set UseFIPSEndpoint on LoadOptions. +func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseFIPSEndpoint = v + return nil + } +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + return o.UseFIPSEndpoint, true, nil +} + +// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. +// +// Zero or more functional options can be provided to provide configuration options for performing +// environment discovery when using aws.DefaultsModeAuto. +func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { + do := DefaultsModeOptions{ + Mode: mode, + } + for _, fn := range optFns { + fn(&do) + } + return func(options *LoadOptions) error { + options.DefaultsModeOptions = do + return nil + } +} + +// GetS3DisableExpressAuth returns the configured value for +// [EnvConfig.S3DisableExpressAuth]. +func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) { + if o.S3DisableExpressAuth == nil { + return false, false + } + + return *o.S3DisableExpressAuth, true +} + +// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth] +// to the value provided. +func WithS3DisableExpressAuth(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3DisableExpressAuth = &v + return nil + } +} + +// WithBaseEndpoint is a helper function to construct functional options that +// sets BaseEndpoint on config's LoadOptions. Empty values have no effect, and +// subsequent calls to this API override previous ones. +// +// This is an in-code setting, therefore, any value set using this hook takes +// precedence over and will override ALL environment and shared config +// directives that set endpoint URLs. Functional options on service clients +// have higher specificity, and functional options that modify the value of +// BaseEndpoint on a client will take precedence over this setting. +func WithBaseEndpoint(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BaseEndpoint = v + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go new file mode 100644 index 00000000..b629137c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go @@ -0,0 +1,51 @@ +package config + +import ( + "fmt" + "net" + "net/url" +) + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + if len(addrs) == 0 { + return false, fmt.Errorf("no addrs found for host, %s", host) + } + + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func validateLocalURL(v string) error { + u, err := url.Parse(v) + if err != nil { + return err + } + + host := u.Hostname() + if len(host) == 0 { + return fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, err := isLoopbackHost(host); err != nil { + return fmt.Errorf("failed to resolve host %q, %v", host, err) + } else if !isLoopback { + return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go new file mode 100644 index 00000000..a8ff40d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -0,0 +1,755 @@ +package config + +import ( + "context" + "io" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// sharedConfigProfileProvider provides access to the shared config profile +// name external configuration value. +type sharedConfigProfileProvider interface { + getSharedConfigProfile(ctx context.Context) (string, bool, error) +} + +// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigProfileProvider); ok { + value, found, err = p.getSharedConfigProfile(ctx) + if err != nil || found { + break + } + } + } + return +} + +// sharedConfigFilesProvider provides access to the shared config filesnames +// external configuration value. +type sharedConfigFilesProvider interface { + getSharedConfigFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigFilesProvider); ok { + value, found, err = p.getSharedConfigFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// sharedCredentialsFilesProvider provides access to the shared credentials filesnames +// external configuration value. +type sharedCredentialsFilesProvider interface { + getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedCredentialsFilesProvider); ok { + value, found, err = p.getSharedCredentialsFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// customCABundleProvider provides access to the custom CA bundle PEM bytes. +type customCABundleProvider interface { + getCustomCABundle(ctx context.Context) (io.Reader, bool, error) +} + +// getCustomCABundle searches the configs for a customCABundleProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(customCABundleProvider); ok { + value, found, err = p.getCustomCABundle(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// regionProvider provides access to the region external configuration value. +type regionProvider interface { + getRegion(ctx context.Context) (string, bool, error) +} + +// getRegion searches the configs for a regionProvider and returns the value +// if found. Returns an error if a provider fails before a value is found. +func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(regionProvider); ok { + value, found, err = p.getRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +type baseEndpointProvider interface { + getBaseEndpoint(ctx context.Context) (string, bool, error) +} + +func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(baseEndpointProvider); ok { + value, found, err = p.getBaseEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +type servicesObjectProvider interface { + getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) +} + +func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(servicesObjectProvider); ok { + value, found, err = p.getServicesObject(ctx) + if err != nil || found { + break + } + } + } + return +} + +// appIDProvider provides access to the sdk app ID value +type appIDProvider interface { + getAppID(ctx context.Context) (string, bool, error) +} + +func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(appIDProvider); ok { + value, found, err = p.getAppID(ctx) + if err != nil || found { + break + } + } + } + return +} + +// disableRequestCompressionProvider provides access to the DisableRequestCompression +type disableRequestCompressionProvider interface { + getDisableRequestCompression(context.Context) (bool, bool, error) +} + +func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(disableRequestCompressionProvider); ok { + value, found, err = p.getDisableRequestCompression(ctx) + if err != nil || found { + break + } + } + } + return +} + +// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes +type requestMinCompressSizeBytesProvider interface { + getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) +} + +func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok { + value, found, err = p.getRequestMinCompressSizeBytes(ctx) + if err != nil || found { + break + } + } + } + return +} + +// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode +type accountIDEndpointModeProvider interface { + getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) +} + +func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(accountIDEndpointModeProvider); ok { + value, found, err = p.getAccountIDEndpointMode(ctx) + if err != nil || found { + break + } + } + } + return +} + +// requestChecksumCalculationProvider provides access to the RequestChecksumCalculation +type requestChecksumCalculationProvider interface { + getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) +} + +func getRequestChecksumCalculation(ctx context.Context, configs configs) (value aws.RequestChecksumCalculation, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(requestChecksumCalculationProvider); ok { + value, found, err = p.getRequestChecksumCalculation(ctx) + if err != nil || found { + break + } + } + } + return +} + +// responseChecksumValidationProvider provides access to the ResponseChecksumValidation +type responseChecksumValidationProvider interface { + getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) +} + +func getResponseChecksumValidation(ctx context.Context, configs configs) (value aws.ResponseChecksumValidation, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(responseChecksumValidationProvider); ok { + value, found, err = p.getResponseChecksumValidation(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2IMDSRegionProvider provides access to the ec2 imds region +// configuration value +type ec2IMDSRegionProvider interface { + getEC2IMDSRegion(ctx context.Context) (string, bool, error) +} + +// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and +// returns the value if found. Returns an error if a provider fails before +// a value is found. +func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(ec2IMDSRegionProvider); ok { + region, found, err = provider.getEC2IMDSRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsProviderProvider provides access to the credentials external +// configuration value. +type credentialsProviderProvider interface { + getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) +} + +// getCredentialsProvider searches the configs for a credentialsProviderProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(credentialsProviderProvider); ok { + p, found, err = provider.getCredentialsProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +type credentialsCacheOptionsProvider interface { + getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) +} + +// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( + f func(*aws.CredentialsCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(credentialsCacheOptionsProvider); ok { + f, found, err = p.getCredentialsCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenProviderProvider provides access to the bearer authentication +// token external configuration value. +type bearerAuthTokenProviderProvider interface { + getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) +} + +// getBearerAuthTokenProvider searches the config sources for a +// bearerAuthTokenProviderProvider and returns the value if found. Returns an +// error if a provider fails before a value is found. +func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { + p, found, err = provider.getBearerAuthTokenProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +type bearerAuthTokenCacheOptionsProvider interface { + getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) +} + +// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( + f func(*smithybearer.TokenCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { + f, found, err = p.getBearerAuthTokenCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +type ssoTokenProviderOptionsProvider interface { + getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) +} + +// getSSOTokenProviderOptions is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( + f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(ssoTokenProviderOptionsProvider); ok { + f, found, err = p.getSSOTokenProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider + +// processCredentialOptions is an interface for retrieving a function for setting +// the processcreds.Options. +type processCredentialOptions interface { + getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) +} + +// getProcessCredentialOptions searches the slice of configs and returns the first function found +func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(processCredentialOptions); ok { + f, found, err = p.getProcessCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2RoleCredentialOptionsProvider is an interface for retrieving a function +// for setting the ec2rolecreds.Provider options. +type ec2RoleCredentialOptionsProvider interface { + getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) +} + +// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { + f, found, err = p.getEC2RoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources +type defaultRegionProvider interface { + getDefaultRegion(ctx context.Context) (string, bool, error) +} + +// getDefaultRegion searches the slice of configs and returns the first fallback region found +func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, config := range configs { + if p, ok := config.(defaultRegionProvider); ok { + value, found, err = p.getDefaultRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointCredentialOptionsProvider is an interface for retrieving a function for setting +// the endpointcreds.ProviderOptions. +type endpointCredentialOptionsProvider interface { + getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) +} + +// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found +func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(endpointCredentialOptionsProvider); ok { + f, found, err = p.getEndpointCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.WebIdentityRoleProvider. +type webIdentityRoleCredentialOptionsProvider interface { + getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) +} + +// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found +func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { + f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.AssumeRoleOptions. +type assumeRoleCredentialOptionsProvider interface { + getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) +} + +// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { + f, found, err = p.getAssumeRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// HTTPClient is an HTTP client implementation +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// httpClientProvider is an interface for retrieving HTTPClient +type httpClientProvider interface { + getHTTPClient(ctx context.Context) (HTTPClient, bool, error) +} + +// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs +func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { + for _, config := range configs { + if p, ok := config.(httpClientProvider); ok { + client, found, err = p.getHTTPClient(ctx) + if err != nil || found { + break + } + } + } + return +} + +// apiOptionsProvider is an interface for retrieving APIOptions +type apiOptionsProvider interface { + getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) +} + +// getAPIOptions searches the slice of configs and returns the APIOptions set on configs +func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { + for _, config := range configs { + if p, ok := config.(apiOptionsProvider); ok { + // retrieve APIOptions from configs and set it on cfg + apiOptions, found, err = p.getAPIOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source +type endpointResolverProvider interface { + getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverProvider); ok { + f, found, err = p.getEndpointResolver(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source +type endpointResolverWithOptionsProvider interface { + getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverWithOptionsProvider); ok { + f, found, err = p.getEndpointResolverWithOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. +type loggerProvider interface { + getLogger(ctx context.Context) (logging.Logger, bool, error) +} + +// getLogger searches the provided config sources for a logging.Logger that can be used +// to configure the aws.Config.Logger value. +func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { + for _, c := range configs { + if p, ok := c.(loggerProvider); ok { + l, found, err = p.getLogger(ctx) + if err != nil || found { + break + } + } + } + return +} + +// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. +type clientLogModeProvider interface { + getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) +} + +func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(clientLogModeProvider); ok { + m, found, err = p.getClientLogMode(ctx) + if err != nil || found { + break + } + } + } + return +} + +// retryProvider is an configuration provider for custom Retryer. +type retryProvider interface { + getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) +} + +func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryProvider); ok { + v, found, err = p.getRetryer(ctx) + if err != nil || found { + break + } + } + } + return +} + +// logConfigurationWarningsProvider is an configuration provider for +// retrieving a boolean indicating whether configuration issues should +// be logged when loading from config sources +type logConfigurationWarningsProvider interface { + getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) +} + +func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { + for _, c := range configs { + if p, ok := c.(logConfigurationWarningsProvider); ok { + v, found, err = p.getLogConfigurationWarnings(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoCredentialOptionsProvider is an interface for retrieving a function for setting +// the ssocreds.Options. +type ssoCredentialOptionsProvider interface { + getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) +} + +func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { + for _, c := range configs { + if p, ok := c.(ssoCredentialOptionsProvider); ok { + v, found, err = p.getSSOProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeIMDSClientProvider interface { + getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) +} + +func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeIMDSClientProvider); ok { + v, found, err = p.getDefaultsModeIMDSClient(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeProvider interface { + getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) +} + +func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeProvider); ok { + v, found, err = p.getDefaultsMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryMaxAttemptsProvider interface { + GetRetryMaxAttempts(context.Context) (int, bool, error) +} + +func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryMaxAttemptsProvider); ok { + v, found, err = p.GetRetryMaxAttempts(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryModeProvider interface { + GetRetryMode(context.Context) (aws.RetryMode, bool, error) +} + +func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryModeProvider); ok { + v, found, err = p.GetRetryMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go new file mode 100644 index 00000000..a68bd099 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -0,0 +1,413 @@ +package config + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/smithy-go/logging" +) + +// resolveDefaultAWSConfig will write default configuration values into the cfg +// value. It will write the default values, overwriting any previous value. +// +// This should be used as the first resolver in the slice of resolvers when +// resolving external configuration. +func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { + var sources []interface{} + for _, s := range cfgs { + sources = append(sources, s) + } + + *cfg = aws.Config{ + Logger: logging.NewStandardLogger(os.Stderr), + ConfigSources: sources, + } + return nil +} + +// resolveCustomCABundle extracts the first instance of a custom CA bundle filename +// from the external configurations. It will update the HTTP Client's builder +// to be configured with the custom CA bundle. +// +// Config provider used: +// * customCABundleProvider +func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { + pemCerts, found, err := getCustomCABundle(ctx, cfgs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + if cfg.HTTPClient == nil { + cfg.HTTPClient = awshttp.NewBuildableClient() + } + + trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ + "has no WithTransportOptions, %T", cfg.HTTPClient) + } + + var appendErr error + client := trOpts.WithTransportOptions(func(tr *http.Transport) { + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = &tls.Config{} + } + if tr.TLSClientConfig.RootCAs == nil { + tr.TLSClientConfig.RootCAs = x509.NewCertPool() + } + + b, err := ioutil.ReadAll(pemCerts) + if err != nil { + appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") + } + + if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { + appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") + } + }) + if appendErr != nil { + return appendErr + } + + cfg.HTTPClient = client + return err +} + +// resolveRegion extracts the first instance of a Region from the configs slice. +// +// Config providers used: +// * regionProvider +func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + v, found, err := getRegion(ctx, configs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + cfg.Region = v + return nil +} + +func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { + var downcastCfgSources []interface{} + for _, cs := range configs { + downcastCfgSources = append(downcastCfgSources, interface{}(cs)) + } + + if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { + cfg.BaseEndpoint = nil + return nil + } + + v, found, err := getBaseEndpoint(ctx, configs) + if err != nil { + return err + } + + if !found { + return nil + } + cfg.BaseEndpoint = aws.String(v) + return nil +} + +// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var +func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { + ID, _, err := getAppID(ctx, configs) + if err != nil { + return err + } + + cfg.AppID = ID + return nil +} + +// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's +// SharedConfig or EnvConfig +func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error { + disable, _, err := getDisableRequestCompression(ctx, configs) + if err != nil { + return err + } + + cfg.DisableRequestCompression = disable + return nil +} + +// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's +// SharedConfig or EnvConfig +func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error { + minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs) + if err != nil { + return err + } + // must set a default min size 10240 if not configured + if !found { + minBytes = 10240 + } + cfg.RequestMinCompressSizeBytes = minBytes + return nil +} + +// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's +// SharedConfig or EnvConfig +func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error { + m, found, err := getAccountIDEndpointMode(ctx, configs) + if err != nil { + return err + } + + if !found { + m = aws.AccountIDEndpointModePreferred + } + + cfg.AccountIDEndpointMode = m + return nil +} + +// resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's +// SharedConfig or EnvConfig +func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getRequestChecksumCalculation(ctx, configs) + if err != nil { + return err + } + + if !found { + c = aws.RequestChecksumCalculationWhenSupported + } + cfg.RequestChecksumCalculation = c + return nil +} + +// resolveResponseValidation extracts the ResponseChecksumValidation from the configs slice's +// SharedConfig or EnvConfig +func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getResponseChecksumValidation(ctx, configs) + if err != nil { + return err + } + + if !found { + c = aws.ResponseChecksumValidationWhenSupported + } + cfg.ResponseChecksumValidation = c + return nil +} + +// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default +// region if region had not been resolved from other sources. +func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + v, found, err := getDefaultRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = v + + return nil +} + +// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance +// if one has not been resolved from other sources. +func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getHTTPClient(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.HTTPClient = c + return nil +} + +// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options +// if one has not been resolved from other sources. +func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + o, found, err := getAPIOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.APIOptions = o + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolver(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolver = endpointResolver + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolverWithOptions = endpointResolver + + return nil +} + +func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { + logger, found, err := getLogger(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Logger = logger + + return nil +} + +func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { + mode, found, err := getClientLogMode(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.ClientLogMode = mode + + return nil +} + +func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { + retryer, found, err := getRetryer(ctx, configs) + if err != nil { + return err + } + + if found { + cfg.Retryer = retryer + return nil + } + + // Only load the retry options if a custom retryer has not be specified. + if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { + return err + } + return resolveRetryMode(ctx, cfg, configs) +} + +func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + region, found, err := getEC2IMDSRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = region + + return nil +} + +func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + defaultsMode, found, err := getDefaultsMode(ctx, configs) + if err != nil { + return err + } + if !found { + defaultsMode = aws.DefaultsModeLegacy + } + + var environment aws.RuntimeEnvironment + if defaultsMode == aws.DefaultsModeAuto { + envConfig, _, _ := getAWSConfigSources(configs) + + client, found, err := getDefaultsModeIMDSClient(ctx, configs) + if err != nil { + return err + } + if !found { + client = imds.NewFromConfig(*cfg) + } + + environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) + if err != nil { + return err + } + } + + cfg.DefaultsMode = defaultsMode + cfg.RuntimeEnvironment = environment + + return nil +} + +func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { + maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMaxAttempts = maxAttempts + + return nil +} + +func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { + retryMode, found, err := getRetryMode(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMode = retryMode + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go new file mode 100644 index 00000000..a8ebb3c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go @@ -0,0 +1,122 @@ +package config + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + smithybearer "github.com/aws/smithy-go/auth/bearer" +) + +// resolveBearerAuthToken extracts a token provider from the config sources. +// +// If an explicit bearer authentication token provider is not found the +// resolver will fallback to resolving token provider via other config sources +// such as SharedConfig. +func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) +} + +// resolveBearerAuthTokenProvider extracts the first instance of +// BearerAuthTokenProvider from the config sources. +// +// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure +// the Token is only refreshed when needed. This also protects the +// TokenProvider so it can be used concurrently. +// +// Config providers used: +// * bearerAuthTokenProviderProvider +func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, tokenProvider) + if err != nil { + return false, err + } + + return true, nil +} + +func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + _, sharedConfig, _ := getAWSConfigSources(configs) + + var provider smithybearer.TokenProvider + + if sharedConfig.SSOSession != nil { + provider, err = resolveBearerAuthSSOTokenProvider( + ctx, cfg, sharedConfig.SSOSession, configs) + } + + if err == nil && provider != nil { + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, provider) + } + + return err +} + +func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + + cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) + } + + client := ssooidc.NewFromConfig(*cfg) + provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) + + return provider, nil +} + +// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go +// bearer/auth#TokenCache with the provided options if the provider is not +// already a TokenCache. +func wrapWithBearerAuthTokenCache( + ctx context.Context, + cfgs configs, + provider smithybearer.TokenProvider, + optFns ...func(*smithybearer.TokenCacheOptions), +) (smithybearer.TokenProvider, error) { + _, ok := provider.(*smithybearer.TokenCache) + if ok { + return provider, nil + } + + tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) + if err != nil { + return nil, err + } + + opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) + opts = append(opts, func(o *smithybearer.TokenCacheOptions) { + o.RefreshBeforeExpires = 5 * time.Minute + o.RetrieveBearerTokenTimeout = 30 * time.Second + }) + opts = append(opts, optFns...) + if optionsFound { + opts = append(opts, tokenCacheConfigOptions) + } + + return smithybearer.NewTokenCache(provider, opts...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go new file mode 100644 index 00000000..b00259df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -0,0 +1,627 @@ +package config + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/sso" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/aws-sdk-go-v2/service/sts" +) + +const ( + // valid credential source values + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" +) + +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + +var ( + ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing +) + +// resolveCredentials extracts a credential provider from slice of config +// sources. +// +// If an explicit credential provider is not found the resolver will fallback +// to resolving credentials by extracting a credential provider from EnvConfig +// and SharedConfig. +func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveCredentialProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveCredentialChain(ctx, cfg, configs) +} + +// resolveCredentialProvider extracts the first instance of Credentials from the +// config slices. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +// +// Config providers used: +// * credentialsProviderProvider +func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + credProvider, found, err := getCredentialsProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) + if err != nil { + return false, err + } + + return true, nil +} + +// resolveCredentialChain resolves a credential provider chain using EnvConfig +// and SharedConfig if present in the slice of provided configs. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + envConfig, sharedConfig, other := getAWSConfigSources(configs) + + // When checking if a profile was specified programmatically we should only consider the "other" + // configuration sources that have been provided. This ensures we correctly honor the expected credential + // hierarchy. + _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) + if err != nil { + return err + } + + switch { + case sharedProfileSet: + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + case envConfig.Credentials.HasKeys(): + ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVars) + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} + case len(envConfig.WebIdentityTokenFilePath) > 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVarsSTSWebIDToken) + err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) + default: + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + } + if err != nil { + return err + } + + // Wrap the resolved provider in a cache so the SDK will cache credentials. + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (ctx2 context.Context, err error) { + switch { + case sharedConfig.Source != nil: + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSourceProfile) + // Assume IAM role with credentials source from a different profile. + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) + + case sharedConfig.Credentials.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + ctx = addCredentialSource(ctx, aws.CredentialSourceProfile) + cfg.Credentials = credentials.StaticCredentialsProvider{ + Value: sharedConfig.Credentials, + Source: getCredentialSources(ctx), + } + + case len(sharedConfig.CredentialSource) != 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileNamedProvider) + ctx, err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) + + case len(sharedConfig.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSTSWebIDToken) + return ctx, assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) + + case sharedConfig.hasSSOConfiguration(): + if sharedConfig.hasLegacySSOConfiguration() { + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSOLegacy) + ctx = addCredentialSource(ctx, aws.CredentialSourceSSOLegacy) + } else { + ctx = addCredentialSource(ctx, aws.CredentialSourceSSO) + } + if sharedConfig.SSOSession != nil { + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSO) + } + err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) + + case len(sharedConfig.CredentialProcess) != 0: + // Get credentials from CredentialProcess + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileProcess) + ctx = addCredentialSource(ctx, aws.CredentialSourceProcess) + err = processCredentials(ctx, cfg, sharedConfig, configs) + + case len(envConfig.ContainerCredentialsRelativePath) != 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + case len(envConfig.ContainerCredentialsEndpoint) != 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + + default: + ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) + err = resolveEC2RoleCredentials(ctx, cfg, configs) + } + if err != nil { + return ctx, err + } + + if len(sharedConfig.RoleARN) > 0 { + return ctx, credsFromAssumeRole(ctx, cfg, sharedConfig, configs) + } + + return ctx, nil +} + +func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + if err := sharedConfig.validateSSOConfiguration(); err != nil { + return err + } + + var options []func(*ssocreds.Options) + v, found, err := getSSOProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + options = append(options, v) + } + + cfgCopy := cfg.Copy() + + options = append(options, func(o *ssocreds.Options) { + o.CredentialSources = getCredentialSources(ctx) + }) + + if sharedConfig.SSOSession != nil { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + cfgCopy.Region = sharedConfig.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name) + if err != nil { + return err + } + oidcClient := ssooidc.NewFromConfig(cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...) + options = append(options, func(o *ssocreds.Options) { + o.SSOTokenProvider = tokenProvider + o.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = sharedConfig.SSORegion + } + + cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) + + return nil +} + +func ecsContainerURI(path string) string { + return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) +} + +func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + var opts []func(*processcreds.Options) + + options, found, err := getProcessCredentialOptions(ctx, configs) + if err != nil { + return err + } + if found { + opts = append(opts, options) + } + + opts = append(opts, func(o *processcreds.Options) { + o.CredentialSources = getCredentialSources(ctx) + }) + + cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) + + return nil +} + +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil + } + + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + + for _, addr := range addrs { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { + return false, nil + } + } + + return true, nil +} + +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + +func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { + var resolveErr error + + parsed, err := url.Parse(endpointURL) + if err != nil { + resolveErr = fmt.Errorf("invalid URL, %w", err) + } else { + host := parsed.Hostname() + if len(host) == 0 { + resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host) + } + } + } + + if resolveErr != nil { + return resolveErr + } + + return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) +} + +func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { + optFns := []func(*endpointcreds.Options){ + func(options *endpointcreds.Options) { + if len(authToken) != 0 { + options.AuthorizationToken = authToken + } + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + var contents []byte + var err error + if contents, err = ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } + return string(contents), nil + }) + } + options.APIOptions = cfg.APIOptions + if cfg.Retryer != nil { + options.Retryer = cfg.Retryer() + } + options.CredentialSources = getCredentialSources(ctx) + }, + } + + optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + provider := endpointcreds.New(url, optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { + options.ExpiryWindow = 5 * time.Minute + }) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (context.Context, error) { + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) + return ctx, resolveEC2RoleCredentials(ctx, cfg, configs) + + case credSourceEnvironment: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} + + case credSourceECSContainer: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + if len(envConfig.ContainerCredentialsRelativePath) != 0 { + return ctx, resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + } + if len(envConfig.ContainerCredentialsEndpoint) != 0 { + return ctx, resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + } + return ctx, fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set") + + default: + return ctx, fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") + } + + return ctx, nil +} + +func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + optFns := make([]func(*ec2rolecreds.Options), 0, 2) + + optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + optFns = append(optFns, func(o *ec2rolecreds.Options) { + // Only define a client from config if not already defined. + if o.Client == nil { + o.Client = imds.NewFromConfig(*cfg) + } + o.CredentialSources = getCredentialSources(ctx) + }) + + provider := ec2rolecreds.New(optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) + if err != nil { + return err + } + return nil +} + +func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { + var ( + envConfig *EnvConfig + sharedConfig *SharedConfig + other configs + ) + + for i := range cfgs { + switch c := cfgs[i].(type) { + case EnvConfig: + if envConfig == nil { + envConfig = &c + } + case *EnvConfig: + if envConfig == nil { + envConfig = c + } + case SharedConfig: + if sharedConfig == nil { + sharedConfig = &c + } + case *SharedConfig: + if envConfig == nil { + sharedConfig = c + } + default: + other = append(other, c) + } + } + + if envConfig == nil { + envConfig = &EnvConfig{} + } + + if sharedConfig == nil { + sharedConfig = &SharedConfig{} + } + + return envConfig, sharedConfig, other +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Error is the error message +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { + if len(filepath) == 0 { + return fmt.Errorf("token file path is not set") + } + + optFns := []func(*stscreds.WebIdentityRoleOptions){ + func(options *stscreds.WebIdentityRoleOptions) { + options.RoleSessionName = sessionName + }, + } + + optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + + if found { + optFns = append(optFns, optFn) + } + + opts := stscreds.WebIdentityRoleOptions{ + RoleARN: roleARN, + } + + optFns = append(optFns, func(options *stscreds.WebIdentityRoleOptions) { + options.CredentialSources = getCredentialSources(ctx) + }) + + for _, fn := range optFns { + fn(&opts) + } + + if len(opts.RoleARN) == 0 { + return fmt.Errorf("role ARN is not set") + } + + client := opts.Client + if client == nil { + client = sts.NewFromConfig(*cfg) + } + + provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) + + cfg.Credentials = provider + + return nil +} + +func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { + // resolve credentials early + credentialSources := getCredentialSources(ctx) + optFns := []func(*stscreds.AssumeRoleOptions){ + func(options *stscreds.AssumeRoleOptions) { + options.RoleSessionName = sharedCfg.RoleSessionName + if sharedCfg.RoleDurationSeconds != nil { + if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { + options.Duration = *sharedCfg.RoleDurationSeconds + } + } + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + options.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) != 0 { + options.SerialNumber = aws.String(sharedCfg.MFASerial) + } + + // add existing credential chain + options.CredentialSources = credentialSources + }, + } + + optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + { + // Synthesize options early to validate configuration errors sooner to ensure a token provider + // is present if the SerialNumber was set. + var o stscreds.AssumeRoleOptions + for _, fn := range optFns { + fn(&o) + } + if o.TokenProvider == nil && o.SerialNumber != nil { + return AssumeRoleTokenProviderNotSetError{} + } + } + cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) + + return nil +} + +// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache +// with the provided options if the provider is not already a +// aws.CredentialsCache. +func wrapWithCredentialsCache( + ctx context.Context, + cfgs configs, + provider aws.CredentialsProvider, + optFns ...func(options *aws.CredentialsCacheOptions), +) (aws.CredentialsProvider, error) { + _, ok := provider.(*aws.CredentialsCache) + if ok { + return provider, nil + } + + credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) + if err != nil { + return nil, err + } + + // force allocation of a new slice if the additional options are + // needed, to prevent overwriting the passed in slice of options. + optFns = optFns[:len(optFns):len(optFns)] + if optionsFound { + optFns = append(optFns, credCacheOptions) + } + + return aws.NewCredentialsCache(provider, optFns...), nil +} + +// credentialSource stores the chain of providers that was used to create an instance of +// a credentials provider on the context +type credentialSource struct{} + +func addCredentialSource(ctx context.Context, source aws.CredentialSource) context.Context { + existing, ok := ctx.Value(credentialSource{}).([]aws.CredentialSource) + if !ok { + existing = []aws.CredentialSource{source} + } else { + existing = append(existing, source) + } + return context.WithValue(ctx, credentialSource{}, existing) +} + +func getCredentialSources(ctx context.Context) []aws.CredentialSource { + return ctx.Value(credentialSource{}).([]aws.CredentialSource) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go new file mode 100644 index 00000000..00b071fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -0,0 +1,1680 @@ +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/internal/ini" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" + "github.com/aws/smithy-go/logging" + smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression" +) + +const ( + // Prefix to use for filtering profiles. The profile prefix should only + // exist in the shared config file, not the credentials file. + profilePrefix = `profile ` + + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // Prefix for services section. It is referenced in profile via the services + // parameter to configure clients for service-specific parameters. + servicesPrefix = `services ` + + // string equivalent for boolean + endpointDiscoveryDisabled = `false` + endpointDiscoveryEnabled = `true` + endpointDiscoveryAuto = `auto` + + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + + ssoRegionKey = "sso_region" + ssoStartURLKey = "sso_start_url" + + ssoAccountIDKey = "sso_account_id" + ssoRoleNameKey = "sso_role_name" + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 Disable Multi-Region AccessPoints + s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` + + useFIPSEndpointKey = "use_fips_endpoint" + + defaultsModeKey = "defaults_mode" + + // Retry options + retryMaxAttemptsKey = "max_attempts" + retryModeKey = "retry_mode" + + caBundleKey = "ca_bundle" + + sdkAppID = "sdk_ua_app_id" + + ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" + + endpointURL = "endpoint_url" + + servicesSectionKey = "services" + + disableRequestCompression = "disable_request_compression" + requestMinCompressionSizeBytes = "request_min_compression_size_bytes" + + s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" + + accountIDKey = "aws_account_id" + accountIDEndpointMode = "account_id_endpoint_mode" + + requestChecksumCalculationKey = "request_checksum_calculation" + responseChecksumValidationKey = "response_checksum_validation" + checksumWhenSupported = "when_supported" + checksumWhenRequired = "when_required" +) + +// defaultSharedConfigProfile allows for swapping the default profile for testing +var defaultSharedConfigProfile = DefaultSharedConfigProfile + +// DefaultSharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func DefaultSharedCredentialsFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials") +} + +// DefaultSharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func DefaultSharedConfigFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config") +} + +// DefaultSharedConfigFiles is a slice of the default shared config files that +// the will be used in order to load the SharedConfig. +var DefaultSharedConfigFiles = []string{ + DefaultSharedConfigFilename(), +} + +// DefaultSharedCredentialsFiles is a slice of the default shared credentials +// files that the will be used in order to load the SharedConfig. +var DefaultSharedCredentialsFiles = []string{ + DefaultSharedCredentialsFilename(), +} + +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type SSOSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *SSOSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURLKey) +} + +// Services contains values configured in the services section +// of the AWS configuration file. +type Services struct { + // Services section values + // {"serviceId": {"key": "value"}} + // e.g. {"s3": {"endpoint_url": "example.com"}} + ServiceValues map[string]map[string]string +} + +func (s *Services) setFromIniSection(section ini.Section) { + if s.ServiceValues == nil { + s.ServiceValues = make(map[string]map[string]string) + } + for _, service := range section.List() { + s.ServiceValues[service] = section.Map(service) + } +} + +// SharedConfig represents the configuration fields of the SDK config files. +type SharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Credentials aws.Credentials + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + // SSO session options + SSOSessionName string + SSOSession *SSOSession + + // Legacy SSO session options + SSORegion string + SSOStartURL string + + // SSO fields not used + SSOAccountID string + SSORoleName string + + RoleARN string + ExternalID string + MFASerial string + RoleSessionName string + RoleDurationSeconds *time.Duration + + SourceProfileName string + Source *SharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region = us-west-2 + Region string + + // EnableEndpointDiscovery can be enabled or disabled in the shared config + // by setting endpoint_discovery_enabled to true, or false respectively. + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + + // Specifies if the S3 service should disable support for Multi-Region + // access-points + // + // s3_disable_multiregion_access_points=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies which defaults mode should be used by services. + // + // defaults_mode=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // max_attempts=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // retry_mode=standard + RetryMode aws.RetryMode + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. Only use + // this if you want to configure the SDK to use a custom set of CAs. + // + // Enabling this option will attempt to merge the Transport into the SDK's + // HTTP client. If the client's Transport is not a http.Transport an error + // will be returned. If the Transport's TLS config is set this option will + // cause the SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this + // setting. To use this option and custom HTTP client, the HTTP client + // needs to be provided when creating the config. Not the service client. + // + // ca_bundle=$HOME/my_custom_ca_bundle + CustomCABundle string + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string + + // Services section config. + ServicesSectionName string + Services Services + + // determine if request compression is allowed, default to false + // retrieved from config file's profile field disable_request_compression + DisableRequestCompression *bool + + // inclusive threshold request body size to trigger compression, + // default to 10240 and must be within 0 and 10485760 bytes inclusive + // retrieved from config file's profile field request_min_compression_size_bytes + RequestMinCompressSizeBytes *int64 + + // Whether S3Express auth is disabled. + // + // This will NOT prevent requests from being made to S3Express buckets, it + // will only bypass the modified endpoint routing and signing behaviors + // associated with the feature. + S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode + + // RequestChecksumCalculation indicates if the request checksum should be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + + // ResponseChecksumValidation indicates if the response checksum should be validated + ResponseChecksumValidation aws.ResponseChecksumValidation +} + +func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + + return c.DefaultsMode, true, nil +} + +// GetRetryMaxAttempts returns the maximum number of attempts an API client +// created Retryer should attempt an operation call before failing. +func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the model the API client should create its Retryer in. +func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + + return c.RetryMode, true, nil +} + +// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region +// the client's requests are sent to. +func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. +func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region +// access-points. +func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetRegion returns the region for the profile if a region is set. +func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetCredentialsProvider returns the credentials for a profile if they were set. +func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { + return c.Credentials, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +// GetS3DisableExpressAuth returns the configured value for +// [SharedConfig.S3DisableExpressAuth]. +func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) { + if c.S3DisableExpressAuth == nil { + return false, false + } + + return *c.S3DisableExpressAuth, true +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// getAppID returns the sdk app ID if set in shared config profile +func (c SharedConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { + if endpt, ok := service[endpointURL]; ok { + return endpt, true, nil + } + } + return "", false, nil +} + +func normalizeShared(sdkID string) string { + lower := strings.ToLower(sdkID) + return strings.ReplaceAll(lower, " ", "_") +} + +func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { + return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil +} + +// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the +// addition of ignoring when none of the files exist or when the profile +// is not found in any of the files. +func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { + cfg, err := loadSharedConfig(ctx, configs) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistError); ok { + return SharedConfig{}, nil + } + return nil, err + } + + return cfg, nil +} + +// loadSharedConfig uses the configs passed in to load the SharedConfig from file +// The file names and profile name are sourced from the configs. +// +// If profile name is not provided DefaultSharedConfigProfile (default) will +// be used. +// +// If shared config filenames are not provided DefaultSharedConfigFiles will +// be used. +// +// Config providers used: +// * sharedConfigProfileProvider +// * sharedConfigFilesProvider +func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { + var profile string + var configFiles []string + var credentialsFiles []string + var ok bool + var err error + + profile, ok, err = getSharedConfigProfile(ctx, configs) + if err != nil { + return nil, err + } + if !ok { + profile = defaultSharedConfigProfile + } + + configFiles, ok, err = getSharedConfigFiles(ctx, configs) + if err != nil { + return nil, err + } + + credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) + if err != nil { + return nil, err + } + + // setup logger if log configuration warning is seti + var logger logging.Logger + logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if found && logWarnings { + logger, found, err = getLogger(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if !found { + logger = logging.NewStandardLogger(os.Stderr) + } + } + + return LoadSharedConfigProfile(ctx, profile, + func(o *LoadSharedConfigOptions) { + o.Logger = logger + o.ConfigFiles = configFiles + o.CredentialsFiles = credentialsFiles + }, + ) +} + +// LoadSharedConfigOptions struct contains optional values that can be used to load the config. +type LoadSharedConfigOptions struct { + + // CredentialsFiles are the shared credentials files + CredentialsFiles []string + + // ConfigFiles are the shared config files + ConfigFiles []string + + // Logger is the logger used to log shared config behavior + Logger logging.Logger +} + +// LoadSharedConfigProfile retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// If config files are not set, SDK will default to using a file at location `.aws/config` if present. +// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. +// No default files are set, if files set to an empty slice. +// +// You can read more about shared config and credentials file location at +// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location +func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { + var option LoadSharedConfigOptions + for _, fn := range optFns { + fn(&option) + } + + if option.ConfigFiles == nil { + option.ConfigFiles = DefaultSharedConfigFiles + } + + if option.CredentialsFiles == nil { + option.CredentialsFiles = DefaultSharedCredentialsFiles + } + + // load shared configuration sections from shared configuration INI options + configSections, err := loadIniFiles(option.ConfigFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processConfigSections(ctx, &configSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + // load shared credentials sections from shared credentials INI options + credentialsSections, err := loadIniFiles(option.CredentialsFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processCredentialsSections(ctx, &credentialsSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + err = mergeSections(&configSections, credentialsSections) + if err != nil { + return SharedConfig{}, err + } + + cfg := SharedConfig{} + profiles := map[string]struct{}{} + + if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { + return SharedConfig{}, err + } + + return cfg, nil +} + +func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + skipSections := map[string]struct{}{} + + for _, section := range sections.List() { + if _, ok := skipSections[section]; ok { + continue + } + + // drop sections from config file that do not have expected prefixes. + switch { + case strings.HasPrefix(section, profilePrefix): + // Rename sections to remove "profile " prefixing to match with + // credentials file. If default is already present, it will be + // dropped. + newName, err := renameProfileSection(section, sections, logger) + if err != nil { + return fmt.Errorf("failed to rename profile section, %w", err) + } + skipSections[newName] = struct{}{} + + case strings.HasPrefix(section, ssoSectionPrefix): + case strings.HasPrefix(section, servicesPrefix): + case strings.EqualFold(section, "default"): + default: + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ + "For use within a shared configuration file, "+ + "a non-default profile must have `profile ` "+ + "prefixed to the profile name.", + section, + ) + } + } + } + return nil +} + +func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { + v, ok := sections.GetSection(section) + if !ok { + return "", fmt.Errorf("error processing profiles within the shared configuration files") + } + + // delete section with profile as prefix + sections.DeleteSection(section) + + // set the value to non-prefixed name in sections. + section = strings.TrimPrefix(section, profilePrefix) + if sections.HasSection(section) { + oldSection, _ := sections.GetSection(section) + v.Logs = append(v.Logs, + fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ + "overriding non-default profile from %s", + v.SourceFile, oldSection.SourceFile)) + sections.DeleteSection(section) + } + + // assign non-prefixed name to section + v.Name = section + sections.SetSection(section, v) + + return section, nil +} + +func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + for _, section := range sections.List() { + // drop profiles with prefix for credential files + if strings.HasPrefix(section, profilePrefix) { + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, + "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ + "for the shared credentials file.\n", + section, + ) + } + } + } + return nil +} + +func loadIniFiles(filenames []string) (ini.Sections, error) { + mergedSections := ini.NewSections() + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + var v *ini.UnableToReadFile + if ok := errors.As(err, &v); ok { + // Skip files which can't be opened and read for whatever reason. + // We treat such files as empty, and do not fall back to other locations. + continue + } else if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + + // mergeSections into mergedSections + err = mergeSections(&mergedSections, sections) + if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + } + + return mergedSections, nil +} + +// mergeSections merges source section properties into destination section properties +func mergeSections(dst *ini.Sections, src ini.Sections) error { + for _, sectionName := range src.List() { + srcSection, _ := src.GetSection(sectionName) + + if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || + (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { + srcSection.Errors = append(srcSection.Errors, + fmt.Errorf("partial credentials found for profile %v", sectionName)) + } + + if !dst.HasSection(sectionName) { + dst.SetSection(sectionName, srcSection) + continue + } + + // merge with destination srcSection + dstSection, _ := dst.GetSection(sectionName) + + // errors should be overriden if any + dstSection.Errors = srcSection.Errors + + // Access key id update + if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { + accessKey := srcSection.String(accessKeyIDKey) + secretKey := srcSection.String(secretAccessKey) + + if dstSection.Has(accessKeyIDKey) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, + dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) + } + + // update access key + v, err := ini.NewStringValue(accessKey) + if err != nil { + return fmt.Errorf("error merging access key, %w", err) + } + dstSection.UpdateValue(accessKeyIDKey, v) + + // update secret key + v, err = ini.NewStringValue(secretKey) + if err != nil { + return fmt.Errorf("error merging secret key, %w", err) + } + dstSection.UpdateValue(secretAccessKey, v) + + // update session token + if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { + return err + } + + // update source file to reflect where the static creds came from + dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) + dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) + } + + stringKeys := []string{ + roleArnKey, + sourceProfileKey, + credentialSourceKey, + externalIDKey, + mfaSerialKey, + roleSessionNameKey, + regionKey, + enableEndpointDiscoveryKey, + credentialProcessKey, + webIdentityTokenFileKey, + s3UseARNRegionKey, + s3DisableMultiRegionAccessPointsKey, + ec2MetadataServiceEndpointModeKey, + ec2MetadataServiceEndpointKey, + ec2MetadataV1DisabledKey, + useDualStackEndpoint, + useFIPSEndpointKey, + defaultsModeKey, + retryModeKey, + caBundleKey, + roleDurationSecondsKey, + retryMaxAttemptsKey, + + ssoSessionNameKey, + ssoAccountIDKey, + ssoRegionKey, + ssoRoleNameKey, + ssoStartURLKey, + } + for i := range stringKeys { + if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { + return err + } + } + + // set srcSection on dst srcSection + *dst = dst.SetSection(sectionName, dstSection) + } + + return nil +} + +func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { + if srcSection.Has(key) { + srcValue := srcSection.String(key) + val, err := ini.NewStringValue(srcValue) + if err != nil { + return fmt.Errorf("error merging %s, %w", key, err) + } + + if dstSection.Has(key) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, + dstSection.SourceFile[key], srcSection.SourceFile[key])) + } + + dstSection.UpdateValue(key, val) + dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) + } + return nil +} + +func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { + return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ + "with a %v value found in a duplicate profile defined at file %v. \n", + sectionName, key, dstSourceFile, key, srcSourceFile) +} + +// Returns an error if all of the files fail to load. If at least one file is +// successfully loaded and contains the profile, no error will be returned. +func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, + sections ini.Sections, logger logging.Logger) error { + c.Profile = profile + + section, ok := sections.GetSection(profile) + if !ok { + return SharedConfigProfileNotExistError{ + Profile: profile, + } + } + + // if logs are appended to the section, log them + if section.Logs != nil && logger != nil { + for _, log := range section.Logs { + logger.Logf(logging.Debug, log) + } + } + + // set config from the provided INI section + err := c.setFromIniSection(profile, section) + if err != nil { + return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + c.clearAssumeRoleOptions() + } else { + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set + if err := c.validateCredentialsConfig(profile); err != nil { + return err + } + } + + // if not top level profile and has credentials, return with credentials. + if len(profiles) != 0 && c.Credentials.HasKeys() { + return nil + } + + profiles[profile] = struct{}{} + + // validate no colliding credentials type are present + if err := c.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(c.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + c.clearCredentialOptions() + + srcCfg := &SharedConfig{} + err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) + if err != nil { + // SourceProfileName that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + Err: err, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + } + } + + c.Source = srcCfg + } + + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if c.hasSSOTokenProviderConfiguration() { + section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName)) + if !ok { + return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName) + } + var ssoSession SSOSession + ssoSession.setFromIniSection(section) + ssoSession.Name = c.SSOSessionName + c.SSOSession = &ssoSession + } + + if len(c.ServicesSectionName) > 0 { + if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok { + var svcs Services + svcs.setFromIniSection(section) + c.Services = svcs + } + } + + return nil +} + +// setFromIniSection loads the configuration from the profile section defined in +// the provided INI file. A SharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { + if len(section.Name) == 0 { + sources := make([]string, 0) + for _, v := range section.SourceFile { + sources = append(sources, v) + } + + return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) + } + + if len(section.Errors) != 0 { + var errStatement string + for i, e := range section.Errors { + errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) + } + return fmt.Errorf("Error using profile: \n %v", errStatement) + } + + // Assume Role + updateString(&c.RoleARN, section, roleArnKey) + updateString(&c.ExternalID, section, externalIDKey) + updateString(&c.MFASerial, section, mfaSerialKey) + updateString(&c.RoleSessionName, section, roleSessionNameKey) + updateString(&c.SourceProfileName, section, sourceProfileKey) + updateString(&c.CredentialSource, section, credentialSourceKey) + updateString(&c.Region, section, regionKey) + + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&c.SSOSessionName, section, ssoSessionNameKey) + + // Legacy SSO session options + updateString(&c.SSORegion, section, ssoRegionKey) + updateString(&c.SSOStartURL, section, ssoStartURLKey) + + // SSO fields not used + updateString(&c.SSOAccountID, section, ssoAccountIDKey) + updateString(&c.SSORoleName, section, ssoRoleNameKey) + + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for #2276: + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 + if section.Has(roleDurationSecondsKey) { + if v, ok := section.Int(roleDurationSecondsKey); ok { + c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) + } else { + c.RoleDurationSeconds = aws.Duration(time.Duration(0)) + } + } + + updateString(&c.CredentialProcess, section, credentialProcessKey) + updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) + updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) + updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey) + + if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) + } + updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) + + updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) + updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) + + if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) + } + + if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) + } + if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) + } + + updateString(&c.CustomCABundle, section, caBundleKey) + + // user agent app ID added to request User-Agent header + updateString(&c.AppID, section, sdkAppID) + + updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) + + updateString(&c.BaseEndpoint, section, endpointURL) + + if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err) + } + if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) + } + + if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) + } + + if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err) + } + if err := updateResponseChecksumValidation(&c.ResponseChecksumValidation, section, responseChecksumValidationKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err) + } + + // Shared Credentials + creds := aws.Credentials{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + AccountID: section.String(accountIDKey), + } + + if creds.HasKeys() { + c.Credentials = creds + } + + updateString(&c.ServicesSectionName, section, servicesSectionKey) + + return nil +} + +func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v, ok := sec.Int(key) + if !ok { + return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key)) + } + if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes { + return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v) + } + *bytes = new(int64) + **bytes = v + return nil +} + +func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch { + case v == "true": + *disable = new(bool) + **disable = true + case v == "false": + *disable = new(bool) + **disable = false + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v) + } + return nil +} + +func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch v { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v) + } + + return nil +} + +func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch strings.ToLower(v) { + case checksumWhenSupported: + *m = aws.RequestChecksumCalculationWhenSupported + case checksumWhenRequired: + *m = aws.RequestChecksumCalculationWhenRequired + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) + } + + return nil +} + +func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch strings.ToLower(v) { + case checksumWhenSupported: + *m = aws.ResponseChecksumValidationWhenSupported + case checksumWhenRequired: + *m = aws.ResponseChecksumValidationWhenRequired + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) + } + + return nil +} + +func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { + if c.RequestMinCompressSizeBytes == nil { + return 0, false, nil + } + return *c.RequestMinCompressSizeBytes, true, nil +} + +func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) { + if c.DisableRequestCompression == nil { + return false, false, nil + } + return *c.DisableRequestCompression, true, nil +} + +func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + +func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { + return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil +} + +func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { + return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil +} + +func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid value: %s", value) + } + return nil +} + +func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { + if !section.Has(key) { + return nil + } + value := section.String(key) + if *mode, err = aws.ParseRetryMode(value); err != nil { + return err + } + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (c *SharedConfig) validateCredentialsConfig(profile string) error { + if err := c.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(c.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(c.CredentialSource) != 0: + credSource = credentialSourceKey + case len(c.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(c.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (c *SharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(c.SourceProfileName) != 0, + len(c.CredentialSource) != 0, + len(c.CredentialProcess) != 0, + len(c.WebIdentityTokenFile) != 0, + ) { + return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token") + } + + return nil +} + +func (c *SharedConfig) validateSSOConfiguration() error { + if c.hasSSOTokenProviderConfiguration() { + err := c.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } + return nil + } + + if c.hasLegacySSOConfiguration() { + err := c.validateLegacySSOConfiguration() + if err != nil { + return err + } + } + return nil +} + +func (c *SharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) + } + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix) + } + + return nil +} + +func (c *SharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil +} + +func (c *SharedConfig) hasCredentials() bool { + switch { + case len(c.SourceProfileName) != 0: + case len(c.CredentialSource) != 0: + case len(c.CredentialProcess) != 0: + case len(c.WebIdentityTokenFile) != 0: + case c.hasSSOConfiguration(): + case c.Credentials.HasKeys(): + default: + return false + } + + return true +} + +func (c *SharedConfig) hasSSOConfiguration() bool { + return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration() +} + +func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *SharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *SharedConfig) clearAssumeRoleOptions() { + c.RoleARN = "" + c.ExternalID = "" + c.MFASerial = "" + c.RoleSessionName = "" + c.SourceProfileName = "" +} + +func (c *SharedConfig) clearCredentialOptions() { + c.CredentialSource = "" + c.CredentialProcess = "" + c.WebIdentityTokenFile = "" + c.Credentials = aws.Credentials{} + c.SSOAccountID = "" + c.SSORegion = "" + c.SSORoleName = "" + c.SSOStartURL = "" +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigLoadError) Unwrap() error { + return e.Err +} + +func (e SharedConfigLoadError) Error() string { + return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) +} + +// SharedConfigProfileNotExistError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistError struct { + Filename []string + Profile string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigProfileNotExistError) Unwrap() error { + return e.Err +} + +func (e SharedConfigProfileNotExistError) Error() string { + return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + Profile string + RoleARN string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) Unwrap() error { + return e.Err +} + +func (e SharedConfigAssumeRoleError) Error() string { + return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", + e.RoleARN, e.Profile, e.Err) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateInt will only update the dst with the value in the section key, key +// is present in the section. +// +// Down casts the INI integer value from a int64 to an int, which could be +// different bit size depending on platform. +func updateInt(dst *int, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + + v, ok := section.Int(key) + if !ok { + return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) + } + + *dst = int(v) + return nil +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = new(bool) + **dst = v +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + value := section.String(key) + if len(value) == 0 { + return + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + } +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = aws.DualStackEndpointStateEnabled + } else { + *dst = aws.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = aws.FIPSEndpointStateEnabled + } else { + *dst = aws.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 989c4ea3..d4e40907 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,282 @@ +# v1.17.67 (2025-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.66 (2025-04-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.65 (2025-03-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.64 (2025-03-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.63 (2025-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.62 (2025-03-04.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.61 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.60 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.59 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.58 (2025-02-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.57 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.56 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.55 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.17.54 (2025-01-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.53 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.52 (2025-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.51 (2025-01-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.50 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.49 (2025-01-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.48 (2024-12-19) + +* **Bug Fix**: Fix improper use of printf-style functions. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.47 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.46 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.45 (2024-11-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.44 (2024-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.43 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.42 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.41 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.40 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.39 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.38 (2024-10-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.37 (2024-09-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.36 (2024-09-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.35 (2024-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.34 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.33 (2024-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.32 (2024-09-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.31 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.30 (2024-08-26) + +* **Bug Fix**: Save SSO cached token expiry in UTC to ensure cross-SDK compatibility. + +# v1.17.29 (2024-08-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.28 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2024-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2024-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.16 (2024-01-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go new file mode 100644 index 00000000..6ed71b42 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -0,0 +1,58 @@ +// Package ec2rolecreds provides the credentials provider implementation for +// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDK's AWS Config +// +// The EC2 Instance role credentials provider will automatically be the resolved +// credential provider in the credential chain if no other credential provider is +// resolved first. +// +// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance +// role for credentials, you specify a `credentials_source` property in the config +// profile the SDK will load. +// +// [default] +// credential_source = Ec2InstanceMetadata +// +// # Loading credentials with the Provider directly +// +// Another way to use the EC2 Instance role credentials provider is to create it +// directly and assign it as the credentials provider for an API client. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// provider := imds.New(imds.Options{}) +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set the configuration options on the +// credentials provider using the imds.Options type to configure the EC2 IMDS +// API Client and ExpiryWindow of the retrieved credentials. +// +// provider := imds.New(imds.Options{ +// // See imds.Options type's documentation for more options available. +// Client: imds.New(Options{ +// HTTPClient: customHTTPClient, +// }), +// +// // Modify how soon credentials expire prior to their original expiry time. +// ExpiryWindow: 5 * time.Minute, +// }) +// +// # EC2 IMDS API Client +// +// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on +// configuring the client, and options available. +package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go new file mode 100644 index 00000000..a95e6c8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -0,0 +1,241 @@ +package ec2rolecreds + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "math" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the +// GetMetadata operation. +type GetMetadataAPIClient interface { + GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) +} + +// A Provider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// The New function must be used to create the with a custom EC2 IMDS client. +// +// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ +// o.Client = imds.New(imds.Options{/* custom options */}) +// }) +type Provider struct { + options Options +} + +// Options is a list of user settable options for setting the behavior of the Provider. +type Options struct { + // The API client that will be used by the provider to make GetMetadata API + // calls to EC2 IMDS. + // + // If nil, the provider will default to the EC2 IMDS client. + Client GetMetadataAPIClient + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource +} + +// New returns an initialized Provider value configured to retrieve +// credentials from EC2 Instance Metadata service. +func New(optFns ...func(*Options)) *Provider { + options := Options{} + + for _, fn := range optFns { + fn(&options) + } + + if options.Client == nil { + options.Client = imds.New(imds.Options{}) + } + + return &Provider{ + options: options, + } +} + +// Retrieve retrieves credentials from the EC2 service. Error will be returned +// if the request fails, or unable to extract the desired credentials. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + credsList, err := requestCredList(ctx, p.options.Client) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + if len(credsList) == 0 { + return aws.Credentials{Source: ProviderName}, + fmt.Errorf("unexpected empty EC2 IMDS role list") + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, p.options.Client, credsName) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + creds := aws.Credentials{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + Source: ProviderName, + + CanExpire: true, + Expires: roleCreds.Expiration, + } + + // Cap role credentials Expires to 1 hour so they can be refreshed more + // often. Jitter will be applied credentials cache if being used. + if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { + creds.Expires = anHour + } + + return creds, nil +} + +// HandleFailToRefresh will extend the credentials Expires time if it it is +// expired. If the credentials will not expire within the minimum time, they +// will be returned. +// +// If the credentials cannot expire, the original error will be returned. +func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( + aws.Credentials, error, +) { + if !prevCreds.CanExpire { + return aws.Credentials{}, err + } + + if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { + return prevCreds, nil + } + + newCreds := prevCreds + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) + } + + // Random distribution of [5,15) minutes. + expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute + newCreds.Expires = sdk.NowTime().Add(expireOffset) + + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) + + return newCreds, nil +} + +// AdjustExpiresBy will adds the passed in duration to the passed in +// credential's Expires time, unless the time until Expires is less than 15 +// minutes. Returns the credentials, even if not updated. +func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( + aws.Credentials, error, +) { + if !creds.CanExpire { + return creds, nil + } + if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} + +// ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. If +// there are no credentials, or there is an error making or receiving the +// request +func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: iamSecurityCredsPath, + }) + if err != nil { + return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) + } + defer resp.Content.Close() + + credsList := []string{} + s := bufio.NewScanner(resp.Content) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: path.Join(iamSecurityCredsPath, credsName), + }) + if err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, err) + } + defer resp.Content.Close() + + var respCreds ec2RoleCredRespBody + if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", + credsName, err) + } + + if !strings.EqualFold(respCreds.Code, "Success") { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, + &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) + } + + return respCreds, nil +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceIMDS} + } // If no source has been set, assume this is used directly which means just call to assume role + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go new file mode 100644 index 00000000..c3f5dadc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go @@ -0,0 +1,48 @@ +package client + +import ( + "context" + "github.com/aws/smithy-go/middleware" +) + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go new file mode 100644 index 00000000..dc291c97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -0,0 +1,165 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID is the client identifer +const ServiceID = "endpoint-credentials" + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is the endpoint client configurable options +type Options struct { + // The endpoint to retrieve credentials from + Endpoint string + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*smithymiddleware.Stack) error +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + return to +} + +// Client is an client for retrieving AWS credentials from an endpoint +type Client struct { + options Options +} + +// New constructs a new Client from the given options +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + if options.HTTPClient == nil { + options.HTTPClient = awshttp.NewBuildableClient() + } + + if options.Retryer == nil { + // Amazon-owned implementations of this endpoint are known to sometimes + // return plaintext responses (i.e. no Code) like normal, add a few + // additional status codes + options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) { + o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{ + Codes: map[int]struct{}{ + http.StatusTooManyRequests: {}, + }, + }) + }) + } + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +// GetCredentialsInput is the input to send with the endpoint service to receive credentials. +type GetCredentialsInput struct { + AuthorizationToken string +} + +// GetCredentials retrieves credentials from credential endpoint +func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { + stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) + stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) + stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) + addProtocolFinalizerMiddlewares(stack, options, "GetCredentials") + retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) + middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) + smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) + smithyhttp.AddCloseResponseBodyMiddleware(stack) + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, err + } + } + + handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, _, err := handler.Handle(ctx, params) + if err != nil { + return nil, err + } + + return result.(*GetCredentialsOutput), err +} + +// GetCredentialsOutput is the response from the credential endpoint +type GetCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string + AccountID string +} + +// EndpointError is an error returned from the endpoint service +type EndpointError struct { + Code string `json:"code"` + Message string `json:"message"` + Fault smithy.ErrorFault `json:"-"` + statusCode int `json:"-"` +} + +// Error is the error mesage string +func (e *EndpointError) Error() string { + return fmt.Sprintf("%s: %s", e.Code, e.Message) +} + +// ErrorCode is the error code returned by the endpoint +func (e *EndpointError) ErrorCode() string { + return e.Code +} + +// ErrorMessage is the error message returned by the endpoint +func (e *EndpointError) ErrorMessage() string { + return e.Message +} + +// ErrorFault indicates error fault classification +func (e *EndpointError) ErrorFault() smithy.ErrorFault { + return e.Fault +} + +// HTTPStatusCode implements retry.HTTPStatusCode. +func (e *EndpointError) HTTPStatusCode() int { + return e.statusCode +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go new file mode 100644 index 00000000..748ee672 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go @@ -0,0 +1,20 @@ +package client + +import ( + "context" + "github.com/aws/smithy-go/middleware" +) + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go new file mode 100644 index 00000000..f2820d20 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go @@ -0,0 +1,164 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/url" + + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type buildEndpoint struct { + Endpoint string +} + +func (b *buildEndpoint) ID() string { + return "BuildEndpoint" +} + +func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( + out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) + } + + if len(b.Endpoint) == 0 { + return out, metadata, fmt.Errorf("endpoint not provided") + } + + parsed, err := url.Parse(b.Endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) + } + + request.URL = parsed + + return next.HandleBuild(ctx, in) +} + +type serializeOpGetCredential struct{} + +func (s *serializeOpGetCredential) ID() string { + return "OperationSerializer" +} + +func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( + out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) + } + + params, ok := in.Parameters.(*GetCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) + } + + const acceptHeader = "Accept" + request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") + + if len(params.AuthorizationToken) > 0 { + const authHeader = "Authorization" + request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) + } + + return next.HandleSerialize(ctx, in) +} + +type deserializeOpGetCredential struct{} + +func (d *deserializeOpGetCredential) ID() string { + return "OperationDeserializer" +} + +func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( + out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, deserializeError(response) + } + + var shape *GetCredentialsOutput + if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} + } + + out.Result = shape + return out, metadata, err +} + +func deserializeError(response *smithyhttp.Response) error { + // we could be talking to anything, json isn't guaranteed + // see https://github.com/aws/aws-sdk-go-v2/issues/2316 + if response.Header.Get("Content-Type") == "application/json" { + return deserializeJSONError(response) + } + + msg, err := io.ReadAll(response.Body) + if err != nil { + return &smithy.DeserializationError{ + Err: fmt.Errorf("read response, %w", err), + } + } + + return &EndpointError{ + // no sensible value for Code + Message: string(msg), + Fault: stof(response.StatusCode), + statusCode: response.StatusCode, + } +} + +func deserializeJSONError(response *smithyhttp.Response) error { + var errShape *EndpointError + if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil { + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode error message, %w", err), + } + } + + errShape.Fault = stof(response.StatusCode) + errShape.statusCode = response.StatusCode + return errShape +} + +// maps HTTP status code to smithy ErrorFault +func stof(code int) smithy.ErrorFault { + if code >= 500 { + return smithy.FaultServer + } + return smithy.FaultClient +} + +func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %w", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %w", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go new file mode 100644 index 00000000..c8ac6d9f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -0,0 +1,207 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +type getCredentialsAPIClient interface { + GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) +} + +// Provider satisfies the aws.CredentialsProvider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + // The AWS Client to make HTTP requests to the endpoint with. The endpoint + // the request will be made to is provided by the aws.Config's + // EndpointResolver. + client getCredentialsAPIClient + + options Options +} + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is structure of configurable options for Provider +type Options struct { + // Endpoint to retrieve credentials from. Required + Endpoint string + + // HTTPClient to handle sending HTTP requests to the target endpoint. + HTTPClient HTTPClient + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*middleware.Stack) error + + // The Retryer to be used for determining whether a failed requested should be retried + Retryer aws.Retryer + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured + AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() +} + +// New returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func New(endpoint string, optFns ...func(*Options)) *Provider { + o := Options{ + Endpoint: endpoint, + } + + for _, fn := range optFns { + fn(&o) + } + + p := &Provider{ + client: client.New(client.Options{ + HTTPClient: o.HTTPClient, + Endpoint: o.Endpoint, + APIOptions: o.APIOptions, + Retryer: o.Retryer, + }), + options: o, + } + + return p +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) + } + + creds := aws.Credentials{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + Source: ProviderName, + AccountID: resp.AccountID, + } + + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { + authToken, err := p.resolveAuthToken() + if err != nil { + return nil, fmt.Errorf("resolve auth token: %v", err) + } + + return p.client.GetCredentials(ctx, &client.GetCredentialsInput{ + AuthorizationToken: authToken, + }) +} + +func (p *Provider) resolveAuthToken() (string, error) { + authToken := p.options.AuthorizationToken + + var err error + if p.options.AuthorizationTokenProvider != nil { + authToken, err = p.options.AuthorizationTokenProvider.GetToken() + if err != nil { + return "", err + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return "", fmt.Errorf("authorization token contains invalid newline sequence") + } + + return authToken, nil +} + +var _ aws.CredentialProviderSource = (*Provider)(nil) + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceHTTP} + } + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index fe92184d..96ab3b85 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.16" +const goModuleVersion = "1.17.67" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go new file mode 100644 index 00000000..a3137b8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go @@ -0,0 +1,92 @@ +// Package processcreds is a credentials provider to retrieve credentials from a +// external CLI invoked process. +// +// WARNING: The following describes a method of sourcing credentials from an external +// process. This can potentially be dangerous, so proceed with caution. Other +// credential providers should be preferred if at all possible. If using this +// option, you should make sure that the config file is as locked down as possible +// using security best practices for your operating system. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDKs AWS Config +// +// You can use credentials from a AWS shared config `credential_process` in a +// variety of ways. +// +// One way is to setup your shared config file, located in the default +// location, with the `credential_process` key and the command you want to be +// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. +// +// [default] +// credential_process = /command/to/call +// +// Loading configuration using external will use the credential process to +// retrieve credentials. NOTE: If there are credentials in the profile you are +// using, the credential process will not be used. +// +// // Initialize a session to load credentials. +// cfg, _ := config.LoadDefaultConfig(context.TODO()) +// +// // Create S3 service client to use the credentials. +// svc := s3.NewFromConfig(cfg) +// +// # Loading credentials with the Provider directly +// +// Another way to use the credentials process provider is by using the +// `NewProvider` constructor to create the provider and providing a it with a +// command to be executed to retrieve credentials. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// // Create credentials using the Provider. +// provider := processcreds.NewProvider("/path/to/command") +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set any configurable options in the +// credentials using one or more option functions. +// +// provider := processcreds.NewProvider("/path/to/command", +// func(o *processcreds.Options) { +// // Override the provider's default timeout +// o.Timeout = 2 * time.Minute +// }) +// +// You can also use your own `exec.Cmd` value by satisfying a value that satisfies +// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. +// +// // Create an exec.Cmd +// cmdBuilder := processcreds.NewCommandBuilderFunc( +// func(ctx context.Context) (*exec.Cmd, error) { +// cmd := exec.CommandContext(ctx, +// "customCLICommand", +// "-a", "argument", +// ) +// cmd.Env = []string{ +// "ENV_VAR_FOO=value", +// "ENV_VAR_BAR=other_value", +// } +// +// return cmd, nil +// }, +// ) +// +// // Create credentials using your exec.Cmd and custom timeout +// provider := processcreds.NewProviderCommand(cmdBuilder, +// func(opt *processcreds.Provider) { +// // optionally override the provider's default timeout +// opt.Timeout = 1 * time.Second +// }) +package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go new file mode 100644 index 00000000..dfc6b254 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -0,0 +1,296 @@ +package processcreds + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProviderError is an error indicating failure initializing or executing the +// process credentials provider +type ProviderError struct { + Err error +} + +// Error returns the error message. +func (e *ProviderError) Error() string { + return fmt.Sprintf("process provider error: %v", e.Err) +} + +// Unwrap returns the underlying error the provider error wraps. +func (e *ProviderError) Unwrap() error { + return e.Err +} + +// Provider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type Provider struct { + // Provides a constructor for exec.Cmd that are invoked by the provider for + // retrieving credentials. Use this to provide custom creation of exec.Cmd + // with things like environment variables, or other configuration. + // + // The provider defaults to the DefaultNewCommand function. + commandBuilder NewCommandBuilder + + options Options +} + +// Options is the configuration options for configuring the Provider. +type Options struct { + // Timeout limits the time a process can run. + Timeout time.Duration + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource +} + +// NewCommandBuilder provides the interface for specifying how command will be +// created that the Provider will use to retrieve credentials with. +type NewCommandBuilder interface { + NewCommand(context.Context) (*exec.Cmd, error) +} + +// NewCommandBuilderFunc provides a wrapper type around a function pointer to +// satisfy the NewCommandBuilder interface. +type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) + +// NewCommand calls the underlying function pointer the builder was initialized with. +func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { + return fn(ctx) +} + +// DefaultNewCommandBuilder provides the default NewCommandBuilder +// implementation used by the provider. It takes a command and arguments to +// invoke. The command will also be initialized with the current process +// environment variables, stderr, and stdin pipes. +type DefaultNewCommandBuilder struct { + Args []string +} + +// NewCommand returns an initialized exec.Cmd with the builder's initialized +// Args. The command is also initialized current process environment variables, +// stderr, and stdin pipes. +func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(b.Args) == 0 { + return nil, &ProviderError{ + Err: fmt.Errorf("failed to prepare command: command must not be empty"), + } + } + + cmdArgs = append(cmdArgs, b.Args...) + cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) + cmd.Env = os.Environ() + + cmd.Stderr = os.Stderr // display stderr on console for MFA + cmd.Stdin = os.Stdin // enable stdin for MFA + + return cmd, nil +} + +// NewProvider returns a pointer to a new Credentials object wrapping the +// Provider. +// +// The provider defaults to the DefaultNewCommandBuilder for creating command +// the Provider will use to retrieve credentials with. +func NewProvider(command string, options ...func(*Options)) *Provider { + var args []string + + // Ensure that the command arguments are not set if the provided command is + // empty. This will error out when the command is executed since no + // arguments are specified. + if len(command) > 0 { + args = []string{command} + } + + commanBuilder := DefaultNewCommandBuilder{ + Args: args, + } + return NewProviderCommand(commanBuilder, options...) +} + +// NewProviderCommand returns a pointer to a new Credentials object with the +// specified command, and default timeout duration. Use this to provide custom +// creation of exec.Cmd for options like environment variables, or other +// configuration. +func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { + p := &Provider{ + commandBuilder: builder, + options: Options{ + Timeout: DefaultTimeout, + }, + } + + for _, option := range options { + option(&p.options) + } + + return p +} + +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. + SecretAccessKey string + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time + + // The ID of the account for credentials + AccountID string `json:"AccountId"` +} + +// Retrieve executes the credential process command and returns the +// credentials, or error if the command fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + out, err := p.executeCredentialProcess(ctx) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + // Serialize and validate response + resp := &CredentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), + } + } + + if resp.Version != 1 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("wrong version in process output (not 1)"), + } + } + + if len(resp.AccessKeyID) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing AccessKeyId in process output"), + } + } + + if len(resp.SecretAccessKey) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing SecretAccessKey in process output"), + } + } + + creds := aws.Credentials{ + Source: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + AccountID: resp.AccountID, + } + + // Handle expiration + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { + if p.options.Timeout >= 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) + defer cancelFunc() + } + + cmd, err := p.commandBuilder.NewCommand(ctx) + if err != nil { + return nil, err + } + + // get creds json on process's stdout + output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, output) + } else { + cmd.Stdout = output + } + + execCh := make(chan error, 1) + go executeCommand(cmd, execCh) + + select { + case execError := <-execCh: + if execError == nil { + break + } + select { + case <-ctx.Done(): + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("credential process timed out: %w", execError), + } + default: + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("error in credential_process: %w", execError), + } + } + } + + out := output.Bytes() + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) + } + + return out, nil +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceProcess} + } + return p.options.CredentialSources +} + +func executeCommand(cmd *exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go new file mode 100644 index 00000000..ece1e65f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go @@ -0,0 +1,81 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS +// credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS +// SSO login flow. The SDK provider expects that you have already performed the +// SSO login flow using AWS CLI using the "aws sso login" command, or by some +// other mechanism. The provider must find a valid non-expired access token for +// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not +// found, it is expired, or the file is malformed an error will be returned. +// +// # Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// specifying the required keys in the profile and referencing an sso-session: +// +// sso_session +// sso_account_id +// sso_role_name +// +// For example, the following defines a profile "devsso" and specifies the AWS +// SSO parameters that defines the target account, role, sign-on portal, and +// the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_session = dev-session +// sso_role_name = SSOReadOnlyRole +// sso_account_id = 123456789012 +// +// [sso-session dev-session] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_region = us-east-1 +// sso_registration_scopes = sso:account:access +// +// Using the config module, you can load the AWS SDK shared configuration, and +// specify that this profile be used to retrieve credentials. For example: +// +// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) +// if err != nil { +// return err +// } +// +// # Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, +// and provide the necessary information to load and retrieve temporary +// credentials using an access token from ~/.aws/sso/cache. +// +// ssoClient := sso.NewFromConfig(cfg) +// ssoOidcClient := ssooidc.NewFromConfig(cfg) +// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") +// if err != nil { +// return err +// } +// +// var provider aws.CredentialsProvider +// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { +// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) +// }) +// +// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time +// provider = aws.NewCredentialsCache(provider) +// +// credentials, err := provider.Retrieve(context.TODO()) +// if err != nil { +// return err +// } +// +// It is important that you wrap the Provider with aws.CredentialsCache if you +// are programmatically constructing the provider directly. This prevents your +// application from accessing the cached access token and requesting new +// credentials each time the credentials are used. +// +// # Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: +// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000..46ae2f92 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,233 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" +) + +var osUserHomeDur = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := osUserHomeDur() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type token struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +func (t token) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +func (t *token) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %w", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (token, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) + } + + var t token + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return token{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %w", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %w", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %w", err) + } + + return nil +} + +type rfc3339 time.Time + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) + } + + return rfc3339(parsed), nil +} + +func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { + var value string + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // unquoting rules. + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + + return nil +} + +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).UTC().Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go new file mode 100644 index 00000000..3ed9cbb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -0,0 +1,165 @@ +package ssocreds + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sso" +) + +// ProviderName is the name of the provider used to specify the source of +// credentials. +const ProviderName = "SSOProvider" + +// GetRoleCredentialsAPIClient is a API client that implements the +// GetRoleCredentials operation. +type GetRoleCredentialsAPIClient interface { + GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( + *sso.GetRoleCredentialsOutput, error, + ) +} + +// Options is the Provider options structure. +type Options struct { + // The Client which is configured for the AWS Region where the AWS SSO user + // portal is located. + Client GetRoleCredentialsAPIClient + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) + // user portal. + StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + SSOTokenProvider *SSOTokenProvider + + // The chain of providers that was used to create this provider. + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource +} + +// Provider is an AWS credential provider that retrieves temporary AWS +// credentials by exchanging an SSO login token. +type Provider struct { + options Options + + cachedTokenFilepath string +} + +// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The +// provided client is expected to be configured for the AWS Region where the +// AWS SSO user portal is located. +func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { + options := Options{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(&options) + } + + return &Provider{ + options: options, + cachedTokenFilepath: options.CachedTokenFilepath, + } +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon +// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present +// in ~/.aws/sso/cache. However, if a token provider configuration exists +// in the shared config, then we ought to use the token provider rather then +// direct access on the cached token. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + var accessToken *string + if p.options.SSOTokenProvider != nil { + token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return aws.Credentials{}, err + } + accessToken = &token.Value + } else { + if p.cachedTokenFilepath == "" { + cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + p.cachedTokenFilepath = cachedTokenFilepath + } + + tokenFile, err := loadCachedToken(p.cachedTokenFilepath) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + + if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { + return aws.Credentials{}, &InvalidTokenError{} + } + accessToken = &tokenFile.AccessToken + } + + output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: accessToken, + AccountId: &p.options.AccountID, + RoleName: &p.options.RoleName, + }) + if err != nil { + return aws.Credentials{}, err + } + + return aws.Credentials{ + AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.ToString(output.RoleCredentials.SessionToken), + CanExpire: true, + Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), + Source: ProviderName, + AccountID: p.options.AccountID, + }, nil +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSSO} + } + return p.options.CredentialSources +} + +// InvalidTokenError is the error type that is returned if loaded token has +// expired or is otherwise invalid. To refresh the SSO session run AWS SSO +// login with the corresponding profile. +type InvalidTokenError struct { + Err error +} + +func (i *InvalidTokenError) Unwrap() error { + return i.Err +} + +func (i *InvalidTokenError) Error() string { + const msg = "the SSO session has expired or is invalid" + if i.Err == nil { + return msg + } + return msg + ": " + i.Err.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go new file mode 100644 index 00000000..7f4fc546 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go @@ -0,0 +1,147 @@ +package ssocreds + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/smithy-go/auth/bearer" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( + *ssooidc.CreateTokenOutput, error, + ) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The set of API Client options to be applied when invoking the + // CreateToken operation. + ClientOptions []func(*ssooidc.Options) + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in +// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's +// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with +// the smithy-go TokenCache, if the external configuration loaded configured +// for an SSO session. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(ctx, cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) + } + } + + expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { + if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { + return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ + ClientId: &cachedToken.ClientID, + ClientSecret: &cachedToken.ClientSecret, + RefreshToken: &cachedToken.RefreshToken, + GrantType: aws.String("refresh_token"), + }, p.options.ClientOptions...) + if err != nil { + return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) + } + + expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) + + cachedToken.AccessToken = aws.ToString(createResult.AccessToken) + cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) + cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { + return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) + } + + return cachedToken, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go index d525cac0..a469abdb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go @@ -22,6 +22,16 @@ func (*StaticCredentialsEmptyError) Error() string { // never expire. type StaticCredentialsProvider struct { Value aws.Credentials + // These values are for reporting purposes and are not meant to be set up directly + Source []aws.CredentialSource +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (s StaticCredentialsProvider) ProviderSources() []aws.CredentialSource { + if s.Source == nil { + return []aws.CredentialSource{aws.CredentialSourceCode} // If no source has been set, assume this is used directly which means hardcoded creds + } + return s.Source } // NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000..1ccf71e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,338 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +// +// The SDK will ensure that per instance of credentials.Credentials all requests +// to refresh the credentials will be synchronized. But, the SDK is unable to +// ensure synchronous usage of the AssumeRoleProvider if the value is shared +// between multiple Credentials or service clients. +// +// # Assume Role +// +// To assume an IAM role using STS with the SDK you can create a new Credentials +// with the SDKs's stscreds package. +// +// // Initial credentials loaded from SDK's default credential chain. Such as +// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance +// // Role. These credentials will be used to to make the STS Assume Role API. +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN. +// stsSvc := sts.NewFromConfig(cfg) +// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with custom MFA Token provider +// +// To assume an IAM role with a MFA token you can either specify a custom MFA +// token provider or use the SDK's built in StdinTokenProvider that will prompt +// the user for a token code each time the credentials need to to be refreshed. +// Specifying a custom token provider allows you to control where the token +// code is retrieved from, and how it is refreshed. +// +// With a custom token provider, the provider is responsible for refreshing the +// token code when called. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// staticTokenProvider := func() (string, error) { +// return someTokenCode, nil +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = staticTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with MFA Token Provider +// +// To assume an IAM role with MFA for longer running tasks where the credentials +// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +// will allow the credential provider to prompt for new MFA token code when the +// role's credentials need to be refreshed. +// +// The StdinTokenProvider function is available to prompt on stdin to retrieve +// the MFA token code from the user. You can also implement custom prompts by +// satisfying the TokenProvider function signature. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = stscreds.StdinTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +package stscreds + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. +type AssumeRoleAPIClient interface { + AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the +// credentials will be valid for. This value is only used by AssumeRoleProvider +// for specifying the default expiry duration of an assume role. +// +// Other providers such as WebIdentityRoleProvider do not use this value, and +// instead rely on STS API's default parameter handing to assign a default +// value. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + options AssumeRoleOptions +} + +// AssumeRoleOptions is the configurable options for AssumeRoleProvider +type AssumeRoleOptions struct { + // Client implementation of the AssumeRole operation. Required + Client AssumeRoleAPIClient + + // IAM Role ARN to be assumed. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyARNs []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + SourceIdentity *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is set. + TokenProvider func() (string, error) + + // A list of session tags that you want to pass. Each session tag consists of a key + // name and an associated value. For more information about session tags, see + // Tagging STS Sessions + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + Tags []types.Tag + + // A list of keys for session tags that you want to set as transitive. If you set a + // tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. + TransitiveTagKeys []string + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource +} + +// NewAssumeRoleProvider constructs and returns a credentials provider that +// will retrieve credentials by assuming a IAM role using STS. +func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { + o := AssumeRoleOptions{ + Client: client, + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&o) + } + + return &AssumeRoleProvider{ + options: o, + } +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + // Apply defaults where parameters are not set. + if len(p.options.RoleSessionName) == 0 { + // Try to work out a role name that will hopefully end up unique. + p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) + } + if p.options.Duration == 0 { + // Expire as often as AWS permits. + p.options.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), + PolicyArns: p.options.PolicyARNs, + RoleArn: aws.String(p.options.RoleARN), + RoleSessionName: aws.String(p.options.RoleSessionName), + ExternalId: p.options.ExternalID, + SourceIdentity: p.options.SourceIdentity, + Tags: p.options.Tags, + TransitiveTagKeys: p.options.TransitiveTagKeys, + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + if p.options.SerialNumber != nil { + if p.options.TokenProvider != nil { + input.SerialNumber = p.options.SerialNumber + code, err := p.options.TokenProvider() + if err != nil { + return aws.Credentials{}, err + } + input.TokenCode = aws.String(code) + } else { + return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") + } + } + + resp, err := p.options.Client.AssumeRole(ctx, input) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + + return aws.Credentials{ + AccessKeyID: *resp.Credentials.AccessKeyId, + SecretAccessKey: *resp.Credentials.SecretAccessKey, + SessionToken: *resp.Credentials.SessionToken, + Source: ProviderName, + + CanExpire: true, + Expires: *resp.Credentials.Expiration, + AccountID: accountID, + }, nil +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *AssumeRoleProvider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRole} + } // If no source has been set, assume this is used directly which means just call to assume role + return append(p.options.CredentialSources, aws.CredentialSourceSTSAssumeRole) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000..5f4286dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,181 @@ +package stscreds + +import ( + "context" + "fmt" + "io/ioutil" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() + +const ( + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. +type AssumeRoleWithWebIdentityAPIClient interface { + AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + options WebIdentityRoleOptions +} + +// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider +type WebIdentityRoleOptions struct { + // Client implementation of the AssumeRoleWithWebIdentity operation. Required + Client AssumeRoleWithWebIdentityAPIClient + + // JWT Token Provider. Required + TokenRetriever IdentityTokenRetriever + + // IAM Role ARN to assume. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. STS will assign a default expiry + // duration if this value is unset. This is different from the Duration + // option of AssumeRoleProvider, which automatically assigns 15 minutes if + // Duration is unset. + // + // See the STS AssumeRoleWithWebIdentity API reference guide for more + // information on defaults. + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + Duration time.Duration + + // An IAM policy in JSON format that you want to use as an inline session policy. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you + // want to use as managed session policies. The policies must exist in the + // same account as the role. + PolicyARNs []types.PolicyDescriptorType + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource +} + +// IdentityTokenRetriever is an interface for retrieving a JWT +type IdentityTokenRetriever interface { + GetIdentityToken() ([]byte, error) +} + +// IdentityTokenFile is for retrieving an identity token from the given file name +type IdentityTokenFile string + +// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte +func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { + b, err := ioutil.ReadFile(string(j)) + if err != nil { + return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) + } + + return b, nil +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.ClientAPI +func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { + o := WebIdentityRoleOptions{ + Client: client, + RoleARN: roleARN, + TokenRetriever: tokenRetriever, + } + + for _, fn := range optFns { + fn(&o) + } + + return &WebIdentityRoleProvider{options: o} +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + b, err := p.options.TokenRetriever.GetIdentityToken() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) + } + + sessionName := p.options.RoleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + } + input := &sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.options.PolicyARNs, + RoleArn: &p.options.RoleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + } + if p.options.Duration != 0 { + // If set use the value, otherwise STS will assign a default expiration duration. + input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + + resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { + options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) + }) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) + } + + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + + value := aws.Credentials{ + AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), + SessionToken: aws.ToString(resp.Credentials.SessionToken), + Source: WebIdentityProviderName, + CanExpire: true, + Expires: *resp.Credentials.Expiration, + AccountID: accountID, + } + return value, nil +} + +// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]" +func getAccountID(u *types.AssumedRoleUser) string { + if u.Arn == nil { + return "" + } + parts := strings.Split(*u.Arn, ":") + if len(parts) < 5 { + return "" + } + return parts[4] +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *WebIdentityRoleProvider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRoleWebID} + } + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md new file mode 100644 index 00000000..1f69e820 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -0,0 +1,435 @@ +# v1.16.30 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.29 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.28 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.27 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.26 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.25 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.16.24 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.23 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.22 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.21 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.20 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.14 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2024-03-21) + +* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls. + +# v1.15.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.11 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.10 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.9 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.8 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.7 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.6 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-11-02) + +* No change notes available for this release. + +# v1.14.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.10 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-03-14) + +* **Feature**: Add flag to disable IMDSv1 fallback + +# v1.12.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.19 (2022-10-24) + +* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. +* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go new file mode 100644 index 00000000..3f4a10e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -0,0 +1,352 @@ +package imds + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID provides the unique name of this API client +const ServiceID = "ec2imds" + +// Client provides the API client for interacting with the Amazon EC2 Instance +// Metadata Service API. +type Client struct { + options Options +} + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState = internalconfig.ClientEnableState + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior + ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled + ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled +) + +// EndpointModeState is an enum configuration variable describing the client endpoint mode. +// Not configurable directly, but used when using the NewFromConfig. +type EndpointModeState = internalconfig.EndpointModeState + +// Enumeration values for EndpointModeState +const ( + EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset + EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 + EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 +) + +const ( + disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Client endpoint options + endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + defaultIPv4Endpoint = "http://169.254.169.254" + defaultIPv6Endpoint = "http://[fd00:ec2::254]" +) + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + options.HTTPClient = resolveHTTPClient(options.HTTPClient) + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) + + if options.ClientEnableState == ClientDefaultEnableState { + if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { + options.ClientEnableState = ClientDisabled + } + } + + if len(options.Endpoint) == 0 { + if v := os.Getenv(endpointEnvVar); len(v) != 0 { + options.Endpoint = v + } + } + + client := &Client{ + options: options, + } + + if client.options.tokenProvider == nil && !client.options.disableAPIToken { + client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) + } + + return client +} + +// NewFromConfig returns an initialized Client based the AWS SDK config, and +// functional options. Provide additional functional options to further +// configure the behavior of the client, such as changing the client's endpoint +// or adding custom middleware behavior. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), + HTTPClient: cfg.HTTPClient, + ClientLogMode: cfg.ClientLogMode, + Logger: cfg.Logger, + } + + if cfg.Retryer != nil { + opts.Retryer = cfg.Retryer() + } + + resolveClientEnableState(cfg, &opts) + resolveEndpointConfig(cfg, &opts) + resolveEndpointModeConfig(cfg, &opts) + resolveEnableFallback(cfg, &opts) + + return New(opts, optFns...) +} + +// Options provides the fields for configuring the API client's behavior. +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation + // call to modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The endpoint the client will use to retrieve EC2 instance metadata. + // + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. + // + // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT + // has a value the client will use the value of the environment variable as + // the endpoint for operation calls. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + Endpoint string + + // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. + // + // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. + // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. + // + // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. + EndpointMode EndpointModeState + + // The HTTP client to invoke API calls with. Defaults to client's default + // HTTP implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Changes if the EC2 Instance Metadata client is enabled or not. Client + // will default to enabled if not set to ClientDisabled. When the client is + // disabled it will return an error for all operation calls. + // + // If ClientEnableState value is ClientDefaultEnableState (default value), + // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to + // "true", the client will be disabled. + // + // AWS_EC2_METADATA_DISABLED=true + ClientEnableState ClientEnableState + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // Configure IMDSv1 fallback behavior. By default, the client will attempt + // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] + // the client will return any errors encountered from attempting to fetch a token + // instead of silently using the insecure data flow of IMDSv1. + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EnableFallback aws.Ternary + + // By default, all IMDS client operations enforce a 5-second timeout. You + // can disable that behavior with this setting. + DisableDefaultTimeout bool + + // provides the caching of API tokens used for operation calls. If unset, + // the API token will not be retrieved for the operation. + tokenProvider *tokenProvider + + // option to disable the API token provider for testing. + disableAPIToken bool +} + +// HTTPClient provides the interface for a client making HTTP requests with the +// API. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) + return to +} + +// WithAPIOptions wraps the API middleware functions, as a functional option +// for the API Client Options. Use this helper to add additional functional +// options to the API client, or operation calls. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), + stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + if options.ClientEnableState == ClientDisabled { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: fmt.Errorf( + "access disabled to EC2 IMDS via client option, or %q environment variable", + disableClientEnvVar), + } + } + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + return result, metadata, err +} + +const ( + // HTTP client constants + defaultDialerTimeout = 250 * time.Millisecond + defaultResponseHeaderTimeout = 500 * time.Millisecond +) + +func resolveHTTPClient(client HTTPClient) HTTPClient { + if client == nil { + client = awshttp.NewBuildableClient() + } + + if c, ok := client.(*awshttp.BuildableClient); ok { + client = c. + WithDialerOptions(func(d *net.Dialer) { + // Use a custom Dial timeout for the EC2 Metadata service to account + // for the possibility the application might not be running in an + // environment with the service present. The client should fail fast in + // this case. + d.Timeout = defaultDialerTimeout + }). + WithTransportOptions(func(tr *http.Transport) { + // Use a custom Transport timeout for the EC2 Metadata service to + // account for the possibility that the application might be running in + // a container, and EC2Metadata service drops the connection after a + // single IP Hop. The client should fail fast in this case. + tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout + }) + } + + return client +} + +func resolveClientEnableState(cfg aws.Config, options *Options) error { + if options.ClientEnableState != ClientDefaultEnableState { + return nil + } + value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.ClientEnableState = value + return nil +} + +func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { + if options.EndpointMode != EndpointModeStateUnset { + return nil + } + value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.EndpointMode = value + return nil +} + +func resolveEndpointConfig(cfg aws.Config, options *Options) error { + if len(options.Endpoint) != 0 { + return nil + } + value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.Endpoint = value + return nil +} + +func resolveEnableFallback(cfg aws.Config, options *Options) { + if options.EnableFallback != aws.UnknownTernary { + return + } + + disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources) + if !ok { + return + } + + if disabled { + options.EnableFallback = aws.FalseTernary + } else { + options.EnableFallback = aws.TrueTernary + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go new file mode 100644 index 00000000..af58b6bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go @@ -0,0 +1,77 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getDynamicDataPath = "/latest/dynamic" + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { + if params == nil { + params = &GetDynamicDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, + addGetDynamicDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetDynamicDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetDynamicDataInput provides the input parameters for the GetDynamicData +// operation. +type GetDynamicDataInput struct { + // The relative dynamic data path to retrieve. Can be empty string to + // retrieve a response containing a new line separated list of dynamic data + // resources available. + // + // Must not include the dynamic data base path. + // + // May include leading slash. If Path includes trailing slash the trailing + // slash will be included in the request for the resource. + Path string +} + +// GetDynamicDataOutput provides the output parameters for the GetDynamicData +// operation. +type GetDynamicDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + "GetDynamicData", + buildGetDynamicDataPath, + buildGetDynamicDataOutput) +} + +func buildGetDynamicDataPath(params interface{}) (string, error) { + p, ok := params.(*GetDynamicDataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getDynamicDataPath, p.Path), nil +} + +func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetDynamicDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go new file mode 100644 index 00000000..5111cc90 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go @@ -0,0 +1,103 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getIAMInfoPath = getMetadataPath + "/iam/info" + +// GetIAMInfo retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetIAMInfo( + ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), +) ( + *GetIAMInfoOutput, error, +) { + if params == nil { + params = &GetIAMInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, + addGetIAMInfoMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetIAMInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. +type GetIAMInfoInput struct{} + +// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. +type GetIAMInfoOutput struct { + IAMInfo + + ResultMetadata middleware.Metadata +} + +func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + "GetIAMInfo", + buildGetIAMInfoPath, + buildGetIAMInfoOutput, + ) +} + +func buildGetIAMInfoPath(params interface{}) (string, error) { + return getIAMInfoPath, nil +} + +func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + imdsResult := &GetIAMInfoOutput{} + if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + // Any code other success is an error + if !strings.EqualFold(imdsResult.Code, "success") { + return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", + imdsResult.Code) + } + + return imdsResult, nil +} + +// IAMInfo provides the shape for unmarshaling an IAM info from the metadata +// API. +type IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go new file mode 100644 index 00000000..dc8c09ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go @@ -0,0 +1,110 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetInstanceIdentityDocument( + ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), +) ( + *GetInstanceIdentityDocumentOutput, error, +) { + if params == nil { + params = &GetInstanceIdentityDocumentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, + addGetInstanceIdentityDocumentMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetInstanceIdentityDocumentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetInstanceIdentityDocumentInput provides the input parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentInput struct{} + +// GetInstanceIdentityDocumentOutput provides the output parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentOutput struct { + InstanceIdentityDocument + + ResultMetadata middleware.Metadata +} + +func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + "GetInstanceIdentityDocument", + buildGetInstanceIdentityDocumentPath, + buildGetInstanceIdentityDocumentOutput, + ) +} + +func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { + return getInstanceIdentityDocumentPath, nil +} + +func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + output := &GetInstanceIdentityDocumentOutput{} + if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + + return output, nil +} + +// InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go new file mode 100644 index 00000000..869bfc9f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go @@ -0,0 +1,77 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getMetadataPath = "/latest/meta-data" + +// GetMetadata uses the path provided to request information from the Amazon +// EC2 Instance Metadata Service. The content will be returned as a string, or +// error if the request failed. +func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { + if params == nil { + params = &GetMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, + addGetMetadataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetMetadataInput provides the input parameters for the GetMetadata +// operation. +type GetMetadataInput struct { + // The relative metadata path to retrieve. Can be empty string to retrieve + // a response containing a new line separated list of metadata resources + // available. + // + // Must not include the metadata base path. + // + // May include leading slash. If Path includes trailing slash the trailing slash + // will be included in the request for the resource. + Path string +} + +// GetMetadataOutput provides the output parameters for the GetMetadata +// operation. +type GetMetadataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + "GetMetadata", + buildGetMetadataPath, + buildGetMetadataOutput) +} + +func buildGetMetadataPath(params interface{}) (string, error) { + p, ok := params.(*GetMetadataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getMetadataPath, p.Path), nil +} + +func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetMetadataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go new file mode 100644 index 00000000..8c0572bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go @@ -0,0 +1,73 @@ +package imds + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// GetRegion retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetRegion( + ctx context.Context, params *GetRegionInput, optFns ...func(*Options), +) ( + *GetRegionOutput, error, +) { + if params == nil { + params = &GetRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, + addGetRegionMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetRegionInput provides the input parameters for GetRegion operation. +type GetRegionInput struct{} + +// GetRegionOutput provides the output parameters for GetRegion operation. +type GetRegionOutput struct { + Region string + + ResultMetadata middleware.Metadata +} + +func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + "GetRegion", + buildGetInstanceIdentityDocumentPath, + buildGetRegionOutput, + ) +} + +func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { + out, err := buildGetInstanceIdentityDocumentOutput(resp) + if err != nil { + return nil, err + } + + result, ok := out.(*GetInstanceIdentityDocumentOutput) + if !ok { + return nil, fmt.Errorf("unexpected instance identity document type, %T", out) + } + + region := result.Region + if len(region) == 0 { + return "", fmt.Errorf("instance metadata did not return a region value") + } + + return &GetRegionOutput{ + Region: region, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go new file mode 100644 index 00000000..1f9ee97a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go @@ -0,0 +1,119 @@ +package imds + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getTokenPath = "/latest/api/token" +const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" + +// getToken uses the duration to return a token for EC2 IMDS, or an error if +// the request failed. +func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { + if params == nil { + params = &getTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, + addGetTokenMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*getTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type getTokenInput struct { + TokenTTL time.Duration +} + +type getTokenOutput struct { + Token string + TokenTTL time.Duration + + ResultMetadata middleware.Metadata +} + +func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { + err := addRequestMiddleware(stack, + options, + "PUT", + "GetToken", + buildGetTokenPath, + buildGetTokenOutput) + if err != nil { + return err + } + + err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) + if err != nil { + return err + } + + return nil +} + +func buildGetTokenPath(interface{}) (string, error) { + return getTokenPath, nil +} + +func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + ttlHeader := resp.Header.Get(tokenTTLHeader) + tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse API token, %w", err) + } + + var token strings.Builder + if _, err = io.Copy(&token, resp.Body); err != nil { + return nil, fmt.Errorf("unable to read API token, %w", err) + } + + return &getTokenOutput{ + Token: token.String(), + TokenTTL: time.Duration(tokenTTL) * time.Second, + }, nil +} + +type tokenTTLRequestHeader struct{} + +func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } +func (*tokenTTLRequestHeader) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) + } + + input, ok := in.Parameters.(*getTokenInput) + if !ok { + return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) + } + + req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go new file mode 100644 index 00000000..89036972 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go @@ -0,0 +1,61 @@ +package imds + +import ( + "context" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getUserDataPath = "/latest/user-data" + +// GetUserData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { + if params == nil { + params = &GetUserDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, + addGetUserDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetUserDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetUserDataInput provides the input parameters for the GetUserData +// operation. +type GetUserDataInput struct{} + +// GetUserDataOutput provides the output parameters for the GetUserData +// operation. +type GetUserDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + "GetUserData", + buildGetUserDataPath, + buildGetUserDataOutput) +} + +func buildGetUserDataPath(params interface{}) (string, error) { + return getUserDataPath, nil +} + +func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetUserDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go new file mode 100644 index 00000000..ad283cf8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go @@ -0,0 +1,48 @@ +package imds + +import ( + "context" + "github.com/aws/smithy-go/middleware" +) + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go new file mode 100644 index 00000000..d5765c36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go @@ -0,0 +1,12 @@ +// Package imds provides the API client for interacting with the Amazon EC2 +// Instance Metadata Service. +// +// All Client operation calls have a default timeout. If the operation is not +// completed before this timeout expires, the operation will be canceled. This +// timeout can be overridden through the following: +// - Set the options flag DisableDefaultTimeout +// - Provide a Context with a timeout or deadline with calling the client's operations. +// +// See the EC2 IMDS user guide for more information on using the API. +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go new file mode 100644 index 00000000..d7540da3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go @@ -0,0 +1,20 @@ +package imds + +import ( + "context" + "github.com/aws/smithy-go/middleware" +) + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go new file mode 100644 index 00000000..dba9ef60 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package imds + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.16.30" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go new file mode 100644 index 00000000..ce774558 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go @@ -0,0 +1,114 @@ +package config + +import ( + "fmt" + "strings" +) + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState uint + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = iota + ClientDisabled + ClientEnabled +) + +// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode +type EndpointModeState uint + +// Enumeration values for ClientEnableState +const ( + EndpointModeStateUnset EndpointModeState = iota + EndpointModeStateIPv4 + EndpointModeStateIPv6 +) + +// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset +func (e *EndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. +type ClientEnableStateResolver interface { + GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) +} + +// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. +type EndpointModeResolver interface { + GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) +} + +// EndpointResolver is a config resolver interface for retrieving the endpoint. +type EndpointResolver interface { + GetEC2IMDSEndpoint() (string, bool, error) +} + +type v1FallbackDisabledResolver interface { + GetEC2IMDSV1FallbackDisabled() (bool, bool) +} + +// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. +func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(ClientEnableStateResolver); ok { + value, found, err = resolver.GetEC2IMDSClientEnableState() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. +func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointModeResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpointMode() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. +func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpoint() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveV1FallbackDisabled ... +func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) { + for _, source := range sources { + if resolver, ok := source.(v1FallbackDisabledResolver); ok { + if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found { + return v, true + } + } + } + return false, false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go new file mode 100644 index 00000000..90cf4aeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -0,0 +1,313 @@ +package imds + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/url" + "path" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func addAPIRequestMiddleware(stack *middleware.Stack, + options Options, + operation string, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput) + if err != nil { + return err + } + + // Token Serializer build and state management. + if !options.disableAPIToken { + err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) + if err != nil { + return err + } + } + + return nil +} + +func addRequestMiddleware(stack *middleware.Stack, + options Options, + method string, + operation string, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) + if err != nil { + return err + } + + // Operation timeout + err = stack.Initialize.Add(&operationTimeout{ + Disabled: options.DisableDefaultTimeout, + DefaultTimeout: defaultOperationTimeout, + }, middleware.Before) + if err != nil { + return err + } + + // Operation Serializer + err = stack.Serialize.Add(&serializeRequest{ + GetPath: getPath, + Method: method, + }, middleware.After) + if err != nil { + return err + } + + // Operation endpoint resolver + err = stack.Serialize.Insert(&resolveEndpoint{ + Endpoint: options.Endpoint, + EndpointMode: options.EndpointMode, + }, "OperationSerializer", middleware.Before) + if err != nil { + return err + } + + // Operation Deserializer + err = stack.Deserialize.Add(&deserializeResponse{ + GetOutput: getOutput, + }, middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: options.ClientLogMode.IsRequest(), + LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(), + LogResponse: options.ClientLogMode.IsResponse(), + LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(), + }, middleware.After) + if err != nil { + return err + } + + err = addSetLoggerMiddleware(stack, options) + if err != nil { + return err + } + + if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil { + return fmt.Errorf("add protocol finalizers: %w", err) + } + + // Retry support + return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ + Retryer: options.Retryer, + LogRetryAttempts: options.ClientLogMode.IsRetries(), + }) +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +type serializeRequest struct { + GetPath func(interface{}) (string, error) + Method string +} + +func (*serializeRequest) ID() string { + return "OperationSerializer" +} + +func (m *serializeRequest) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + reqPath, err := m.GetPath(in.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) + } + + request.Request.URL.Path = reqPath + request.Request.Method = m.Method + + return next.HandleSerialize(ctx, in) +} + +type deserializeResponse struct { + GetOutput func(*smithyhttp.Response) (interface{}, error) +} + +func (*deserializeResponse) ID() string { + return "OperationDeserializer" +} + +func (m *deserializeResponse) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf( + "unexpected transport response type, %T, want %T", out.RawResponse, resp) + } + defer resp.Body.Close() + + // read the full body so that any operation timeouts cleanup will not race + // the body being read. + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return out, metadata, fmt.Errorf("read response body failed, %w", err) + } + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // Anything that's not 200 |< 300 is error + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return out, metadata, &smithyhttp.ResponseError{ + Response: resp, + Err: fmt.Errorf("request to EC2 IMDS failed"), + } + } + + result, err := m.GetOutput(resp) + if err != nil { + return out, metadata, fmt.Errorf( + "unable to get deserialized result for response, %w", err, + ) + } + out.Result = result + + return out, metadata, err +} + +type resolveEndpoint struct { + Endpoint string + EndpointMode EndpointModeState +} + +func (*resolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *resolveEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + var endpoint string + if len(m.Endpoint) > 0 { + endpoint = m.Endpoint + } else { + switch m.EndpointMode { + case EndpointModeStateIPv6: + endpoint = defaultIPv6Endpoint + case EndpointModeStateIPv4: + fallthrough + case EndpointModeStateUnset: + endpoint = defaultIPv4Endpoint + default: + return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") + } + } + + req.URL, err = url.Parse(endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + return next.HandleSerialize(ctx, in) +} + +const ( + defaultOperationTimeout = 5 * time.Second +) + +// operationTimeout adds a timeout on the middleware stack if the Context the +// stack was called with does not have a deadline. The next middleware must +// complete before the timeout, or the context will be canceled. +// +// If DefaultTimeout is zero, no default timeout will be used if the Context +// does not have a timeout. +// +// The next middleware must also ensure that any resources that are also +// canceled by the stack's context are completely consumed before returning. +// Otherwise the timeout cleanup will race the resource being consumed +// upstream. +type operationTimeout struct { + Disabled bool + DefaultTimeout time.Duration +} + +func (*operationTimeout) ID() string { return "OperationTimeout" } + +func (m *operationTimeout) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + output middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.Disabled { + return next.HandleInitialize(ctx, input) + } + + if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { + var cancelFn func() + ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) + defer cancelFn() + } + + return next.HandleInitialize(ctx, input) +} + +// appendURIPath joins a URI path component to the existing path with `/` +// separators between the path components. If the path being added ends with a +// trailing `/` that slash will be maintained. +func appendURIPath(base, add string) string { + reqPath := path.Join(base, add) + if len(add) != 0 && add[len(add)-1] == '/' { + reqPath += "/" + } + return reqPath +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %w", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %w", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go new file mode 100644 index 00000000..5703c6e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go @@ -0,0 +1,261 @@ +package imds + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + // Headers for Token and TTL + tokenHeader = "x-aws-ec2-metadata-token" + defaultTokenTTL = 5 * time.Minute +) + +type tokenProvider struct { + client *Client + tokenTTL time.Duration + + token *apiToken + tokenMux sync.RWMutex + + disabled uint32 // Atomic updated +} + +func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { + return &tokenProvider{ + client: client, + tokenTTL: ttl, + } +} + +// apiToken provides the API token used by all operation calls for th EC2 +// Instance metadata service. +type apiToken struct { + token string + expires time.Time +} + +var timeNow = time.Now + +// Expired returns if the token is expired. +func (t *apiToken) Expired() bool { + // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry + // time is always based on reported wall-clock time. + return timeNow().Round(0).After(t.expires) +} + +func (t *tokenProvider) ID() string { return "APITokenProvider" } + +// HandleFinalize is the finalize stack middleware, that if the token provider is +// enabled, will attempt to add the cached API token to the request. If the API +// token is not cached, it will be retrieved in a separate API call, getToken. +// +// For retry attempts, handler must be added after attempt retryer. +// +// If request for getToken fails the token provider may be disabled from future +// requests, depending on the response status code. +func (t *tokenProvider) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if t.fallbackEnabled() && !t.enabled() { + // short-circuits to insecure data flow if token provider is disabled. + return next.HandleFinalize(ctx, input) + } + + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) + } + + tok, err := t.getToken(ctx) + if err != nil { + // If the error allows the token to downgrade to insecure flow allow that. + var bypassErr *bypassTokenRetrievalError + if errors.As(err, &bypassErr) { + return next.HandleFinalize(ctx, input) + } + + return out, metadata, fmt.Errorf("failed to get API token, %w", err) + } + + req.Header.Set(tokenHeader, tok.token) + + return next.HandleFinalize(ctx, input) +} + +// HandleDeserialize is the deserialize stack middleware for determining if the +// operation the token provider is decorating failed because of a 401 +// unauthorized status code. If the operation failed for that reason the token +// provider needs to be re-enabled so that it can start adding the API token to +// operation calls. +func (t *tokenProvider) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, input) + if err == nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) + } + + if resp.StatusCode == http.StatusUnauthorized { // unauthorized + t.enable() + err = &retryableError{Err: err, isRetryable: true} + } + + return out, metadata, err +} + +func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { + if t.fallbackEnabled() && !t.enabled() { + return nil, &bypassTokenRetrievalError{ + Err: fmt.Errorf("cannot get API token, provider disabled"), + } + } + + t.tokenMux.RLock() + tok = t.token + t.tokenMux.RUnlock() + + if tok != nil && !tok.Expired() { + return tok, nil + } + + tok, err = t.updateToken(ctx) + if err != nil { + return nil, err + } + + return tok, nil +} + +func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { + t.tokenMux.Lock() + defer t.tokenMux.Unlock() + + // Prevent multiple requests to update retrieving the token. + if t.token != nil && !t.token.Expired() { + tok := t.token + return tok, nil + } + + result, err := t.client.getToken(ctx, &getTokenInput{ + TokenTTL: t.tokenTTL, + }) + if err != nil { + var statusErr interface{ HTTPStatusCode() int } + if errors.As(err, &statusErr) { + switch statusErr.HTTPStatusCode() { + // Disable future get token if failed because of 403, 404, or 405 + case http.StatusForbidden, + http.StatusNotFound, + http.StatusMethodNotAllowed: + + if t.fallbackEnabled() { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) + t.disable() + } + + // 400 errors are terminal, and need to be upstreamed + case http.StatusBadRequest: + return nil, err + } + } + + // Disable if request send failed or timed out getting response + var re *smithyhttp.RequestSendError + var ce *smithy.CanceledError + if errors.As(err, &re) || errors.As(err, &ce) { + atomic.StoreUint32(&t.disabled, 1) + } + + if !t.fallbackEnabled() { + // NOTE: getToken() is an implementation detail of some outer operation + // (e.g. GetMetadata). It has its own retries that have already been exhausted. + // Mark the underlying error as a terminal error. + err = &retryableError{Err: err, isRetryable: false} + return nil, err + } + + // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request + // and allow the request to proceed. Future requests _may_ re-attempt fetching a + // token if not disabled. + return nil, &bypassTokenRetrievalError{Err: err} + } + + tok := &apiToken{ + token: result.Token, + expires: timeNow().Add(result.TokenTTL), + } + t.token = tok + + return tok, nil +} + +// enabled returns if the token provider is current enabled or not. +func (t *tokenProvider) enabled() bool { + return atomic.LoadUint32(&t.disabled) == 0 +} + +// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise +func (t *tokenProvider) fallbackEnabled() bool { + switch t.client.options.EnableFallback { + case aws.FalseTernary: + return false + default: + return true + } +} + +// disable disables the token provider and it will no longer attempt to inject +// the token, nor request updates. +func (t *tokenProvider) disable() { + atomic.StoreUint32(&t.disabled, 1) +} + +// enable enables the token provide to start refreshing tokens, and adding them +// to the pending request. +func (t *tokenProvider) enable() { + t.tokenMux.Lock() + t.token = nil + t.tokenMux.Unlock() + atomic.StoreUint32(&t.disabled, 0) +} + +type bypassTokenRetrievalError struct { + Err error +} + +func (e *bypassTokenRetrievalError) Error() string { + return fmt.Sprintf("bypass token retrieval, %v", e.Err) +} + +func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } + +type retryableError struct { + Err error + isRetryable bool +} + +func (e *retryableError) RetryableError() bool { return e.isRetryable } + +func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md index fe53e9a5..57c03009 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -1,3 +1,415 @@ +# v1.17.73 (2025-04-23) + +* **Bug Fix**: Abort multi part download if the object is modified during download + +# v1.17.72 (2025-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.71 (2025-04-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.70 (2025-03-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.69 (2025-03-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.68 (2025-03-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.67 (2025-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.66 (2025-03-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.65 (2025-03-04.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.64 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.63 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.62 (2025-02-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.61 (2025-02-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.60 (2025-02-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.59 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.58 (2025-02-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.57 (2025-01-31) + +* **Bug Fix**: Fix incorrect reference to old s3manager in comments. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.56 (2025-01-30) + +* **Bug Fix**: Fix incorrect reference to old s3manager in comments. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.55 (2025-01-29) + +* **Bug Fix**: Fix incorrect reference to old s3manager in comments. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.54 (2025-01-24) + +* **Bug Fix**: Fix incorrect reference to old s3manager in comments. +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.17.53 (2025-01-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.52 (2025-01-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.51 (2025-01-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.50 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.49 (2025-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.48 (2025-01-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.47 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.46 (2025-01-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.45 (2025-01-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.44 (2024-12-19) + +* **Bug Fix**: Fix improper use of printf-style functions. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.43 (2024-12-03.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.42 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.41 (2024-11-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.40 (2024-11-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.39 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.38 (2024-11-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.37 (2024-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.36 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.35 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.34 (2024-10-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.33 (2024-10-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.32 (2024-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.31 (2024-10-09) + +* **Bug Fix**: Fixup some integration tests. + +# v1.17.30 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.29 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.28 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.27 (2024-10-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.26 (2024-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.25 (2024-09-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2024-09-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2024-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.22 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.21 (2024-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.20 (2024-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2024-09-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-09-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-08-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-08-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-08-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-08-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-08-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-08-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-07-24) + +* **Documentation**: Clarify region hint and credential usage in HeadBucketRegion. + +# v1.17.8 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2024-07-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.25 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.24 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.23 (2024-06-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.22 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.21 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.20 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2024-05-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.14 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2024-03-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2024-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2024-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2024-02-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2024-02-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.15.15 (2024-01-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go index a6a9781e..8c701952 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go @@ -17,15 +17,13 @@ const bucketRegionHeader = "X-Amz-Bucket-Region" // GetBucketRegion will attempt to get the region for a bucket using the // client's configured region to determine which AWS partition to perform the query on. // -// The request will not be signed, and will not use your AWS credentials. -// // A BucketNotFound error will be returned if the bucket does not exist in the // AWS partition the client region belongs to. // // For example to get the region of a bucket which exists in "eu-central-1" // you could provide a region hint of "us-west-2". // -// cfg, err := config.LoadDefaultConfig(context.TODO()) +// cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2")) // if err != nil { // log.Println("error:", err) // return @@ -60,6 +58,17 @@ const bucketRegionHeader = "X-Amz-Bucket-Region" // if err != nil { // panic(err) // } +// +// If buckets are public, you may use anonymous credential like so. +// +// manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket, func(o *s3.Options) { +// o.Credentials = nil +// // Or +// o.Credentials = aws.AnonymousCredentials{} +// }) +// +// The request with anonymous credentials will not be signed. +// Otherwise credentials would be required for private buckets. func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) { var captureBucketRegion deserializeBucketRegion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go index 06070ada..8acd9a27 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go @@ -158,7 +158,7 @@ func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloade // // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput // buf := make([]byte, int(headObject.ContentLength)) // // wrap with aws.WriteAtBuffer -// w := s3manager.NewWriteAtBuffer(buf) +// w := manager.NewWriteAtBuffer(buf) // // download file into the memory // numBytesDownloaded, err := downloader.Download(ctx, w, &s3.GetObjectInput{ // Bucket: aws.String(bucket), @@ -183,7 +183,10 @@ func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetOb // Copy ClientOptions clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1) clientOptions = append(clientOptions, func(o *s3.Options) { - o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey)) + o.APIOptions = append(o.APIOptions, + middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + addFeatureUserAgent, // yes, there are two of these + ) }) clientOptions = append(clientOptions, impl.cfg.ClientOptions...) impl.cfg.ClientOptions = clientOptions @@ -217,14 +220,15 @@ type downloader struct { in *s3.GetObjectInput w io.WriterAt - wg sync.WaitGroup - m sync.Mutex - - pos int64 - totalBytes int64 - written int64 - err error + wg sync.WaitGroup + m sync.Mutex + once sync.Once + pos int64 + totalBytes int64 + written int64 + err error + etag string partBodyMaxRetries int } @@ -355,6 +359,9 @@ func (d *downloader) downloadChunk(chunk dlchunk) error { // Get the next byte range of data params.Range = aws.String(chunk.ByteRange()) + if params.VersionId == nil && d.etag != "" { + params.IfMatch = aws.String(d.etag) + } var n int64 var err error @@ -398,6 +405,9 @@ func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (i return 0, err } d.setTotalBytes(resp) // Set total if not yet set. + d.once.Do(func() { + d.etag = aws.ToString(resp.ETag) + }) var src io.Reader = resp.Body if d.cfg.BufferProvider != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go index 6fe1aa68..4ac4906a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -3,4 +3,4 @@ package manager // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.15" +const goModuleVersion = "1.17.73" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go index 968f9073..399d81aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go @@ -15,7 +15,7 @@ import ( // requires payload signing. // // Note: If using with S3 PutObject to stream an object upload. The SDK's S3 -// Upload Manager(s3manager.Uploader) provides support for streaming +// Upload Manager(manager.Uploader) provides support for streaming // with the ability to retry network errors. func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser { return &ReaderSeekerCloser{r} @@ -137,7 +137,7 @@ func (r *ReaderSeekerCloser) Close() error { } // A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer +// Can be used with the manager.Downloader to download content to a buffer // in memory. Safe to use concurrently. type WriteAtBuffer struct { buf []byte diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go index d1be506e..93133aee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go @@ -3,6 +3,7 @@ package manager import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -10,7 +11,6 @@ import ( "sync" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/awsutil" internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" @@ -121,6 +121,9 @@ type UploadOutput struct { // The base64-encoded, 32-bit CRC32C checksum of the object. ChecksumCRC32C *string + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCRC64NVME *string + // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSHA1 *string @@ -311,6 +314,7 @@ func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ... clientOptions = append(clientOptions, func(o *s3.Options) { o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + addFeatureUserAgent, // yes, there are two of these func(s *smithymiddleware.Stack) error { return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After) }, @@ -510,6 +514,7 @@ func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, e BucketKeyEnabled: aws.ToBool(out.BucketKeyEnabled), ChecksumCRC32: out.ChecksumCRC32, ChecksumCRC32C: out.ChecksumCRC32C, + ChecksumCRC64NVME: out.ChecksumCRC64NVME, ChecksumSHA1: out.ChecksumSHA1, ChecksumSHA256: out.ChecksumSHA256, ETag: out.ETag, @@ -583,6 +588,8 @@ func (a completedParts) Less(i, j int) bool { // upload will perform a multipart upload using the firstBuf buffer containing // the first chunk of data. func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) { + u.initChecksumAlgorithm() + var params s3.CreateMultipartUploadInput awsutil.Copy(¶ms, u.in) @@ -650,6 +657,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO BucketKeyEnabled: aws.ToBool(completeOut.BucketKeyEnabled), ChecksumCRC32: completeOut.ChecksumCRC32, ChecksumCRC32C: completeOut.ChecksumCRC32C, + ChecksumCRC64NVME: completeOut.ChecksumCRC64NVME, ChecksumSHA1: completeOut.ChecksumSHA1, ChecksumSHA256: completeOut.ChecksumSHA256, ETag: completeOut.ETag, @@ -685,7 +693,7 @@ func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", MaxUploadParts) } - return false, fmt.Errorf(msg) + return false, errors.New(msg) } return true, err @@ -751,6 +759,27 @@ func (u *multiuploader) send(c chunk) error { return nil } +func (u *multiuploader) initChecksumAlgorithm() { + if u.in.ChecksumAlgorithm != "" { + return + } + + switch { + case u.in.ChecksumCRC32 != nil: + u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32 + case u.in.ChecksumCRC32C != nil: + u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32c + case u.in.ChecksumCRC64NVME != nil: + u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc64nvme + case u.in.ChecksumSHA1 != nil: + u.in.ChecksumAlgorithm = types.ChecksumAlgorithmSha1 + case u.in.ChecksumSHA256 != nil: + u.in.ChecksumAlgorithm = types.ChecksumAlgorithmSha256 + default: + u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32 + } +} + // geterr is a thread-safe getter for the error object func (u *multiuploader) geterr() error { u.m.Lock() @@ -853,3 +882,31 @@ func (*setS3ExpressDefaultChecksum) HandleFinalize( return next.HandleFinalize(ctx, in) } + +func addFeatureUserAgent(stack *smithymiddleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(middleware.UserAgentFeatureS3Transfer) + return nil +} + +func getOrAddRequestUserAgent(stack *smithymiddleware.Stack) (*middleware.RequestUserAgent, error) { + id := (*middleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = middleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, smithymiddleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*middleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go index 0c5a2d40..24db8e14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go @@ -5,6 +5,7 @@ import ( "fmt" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" "github.com/aws/smithy-go/auth" @@ -39,7 +40,10 @@ func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + signingTime := sdk.NowTime() + skew := internalcontext.GetAttemptSkewContext(ctx) + signingTime = signingTime.Add(skew) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go deleted file mode 100644 index 58ef438a..00000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go +++ /dev/null @@ -1,225 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - if dstVal.Kind() == reflect.String { - dstVal.SetString(srcVal.String()) - } else { - dstVal.Set(srcVal) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index dc87ec41..eae3e16a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,148 @@ +# v1.3.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.3.28 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 41ee0bfb..eddabe63 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.10" +const goModuleVersion = "1.3.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go index 15bf1047..f0c283d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go @@ -2,12 +2,14 @@ package context import ( "context" + "time" "github.com/aws/smithy-go/middleware" ) type s3BackendKey struct{} type checksumInputAlgorithmKey struct{} +type clockSkew struct{} const ( // S3BackendS3Express identifies the S3Express backend @@ -37,3 +39,14 @@ func GetChecksumInputAlgorithm(ctx context.Context) string { v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) return v } + +// SetAttemptSkewContext sets the clock skew value on the context +func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context { + return middleware.WithStackValue(ctx, clockSkew{}, v) +} + +// GetAttemptSkewContext gets the clock skew value from the context +func GetAttemptSkewContext(ctx context.Context) time.Duration { + x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration) + return x +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go index ba603275..91414afe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go @@ -12,11 +12,12 @@ type Partition struct { // PartitionConfig provides the endpoint metadata for an AWS region or partition. type PartitionConfig struct { - Name string `json:"name"` - DnsSuffix string `json:"dnsSuffix"` - DualStackDnsSuffix string `json:"dualStackDnsSuffix"` - SupportsFIPS bool `json:"supportsFIPS"` - SupportsDualStack bool `json:"supportsDualStack"` + Name string `json:"name"` + DnsSuffix string `json:"dnsSuffix"` + DualStackDnsSuffix string `json:"dualStackDnsSuffix"` + SupportsFIPS bool `json:"supportsFIPS"` + SupportsDualStack bool `json:"supportsDualStack"` + ImplicitGlobalRegion string `json:"implicitGlobalRegion"` } type RegionOverrides struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 849beffd..5f077999 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -13,11 +13,12 @@ var partitions = []Partition{ ID: "aws", RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-east-1", }, Regions: map[string]RegionOverrides{ "af-south-1": { @@ -111,6 +112,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ca-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-central-1": { Name: nil, DnsSuffix: nil, @@ -229,11 +237,12 @@ var partitions = []Partition{ ID: "aws-cn", RegionRegex: "^cn\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-cn", - DnsSuffix: "amazonaws.com.cn", - DualStackDnsSuffix: "api.amazonwebservices.com.cn", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-cn", + DnsSuffix: "amazonaws.com.cn", + DualStackDnsSuffix: "api.amazonwebservices.com.cn", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "cn-northwest-1", }, Regions: map[string]RegionOverrides{ "aws-cn-global": { @@ -263,11 +272,12 @@ var partitions = []Partition{ ID: "aws-us-gov", RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", }, Regions: map[string]RegionOverrides{ "aws-us-gov-global": { @@ -297,11 +307,12 @@ var partitions = []Partition{ ID: "aws-iso", RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso", - DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso", + DnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "c2s.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-iso-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-global": { @@ -331,11 +342,12 @@ var partitions = []Partition{ ID: "aws-iso-b", RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-b", - DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-b", + DnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "sc2s.sgov.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isob-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-b-global": { @@ -358,23 +370,33 @@ var partitions = []Partition{ ID: "aws-iso-e", RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-e", - DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-e", + DnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "cloud.adc-e.uk", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "eu-isoe-west-1", + }, + Regions: map[string]RegionOverrides{ + "eu-isoe-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, }, - Regions: map[string]RegionOverrides{}, }, { ID: "aws-iso-f", RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-f", - DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-f", + DnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "csp.hci.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isof-south-1", }, Regions: map[string]RegionOverrides{}, }, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index f376f690..e19224f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -9,7 +9,7 @@ "supportsDualStack" : true, "supportsFIPS" : true }, - "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$", "regions" : { "af-south-1" : { "description" : "Africa (Cape Town)" @@ -44,6 +44,12 @@ "ap-southeast-4" : { "description" : "Asia Pacific (Melbourne)" }, + "ap-southeast-5" : { + "description" : "Asia Pacific (Malaysia)" + }, + "ap-southeast-7" : { + "description" : "Asia Pacific (Thailand)" + }, "aws-global" : { "description" : "AWS Standard global region" }, @@ -86,6 +92,9 @@ "me-south-1" : { "description" : "Middle East (Bahrain)" }, + "mx-central-1" : { + "description" : "Mexico (Central)" + }, "sa-east-1" : { "description" : "South America (Sao Paulo)" }, @@ -198,7 +207,11 @@ "supportsFIPS" : true }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { } + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + } }, { "id" : "aws-iso-f", "outputs" : { @@ -210,7 +223,17 @@ "supportsFIPS" : true }, "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", - "regions" : { } + "regions" : { + "aws-iso-f-global" : { + "description" : "AWS ISOF global region" + }, + "us-isof-east-1" : { + "description" : "US ISOF EAST" + }, + "us-isof-south-1" : { + "description" : "US ISOF SOUTH" + } + } } ], "version" : "1.1" } \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index e0265474..83e5bd28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,150 @@ +# v2.6.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.33 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.32 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.31 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.30 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.29 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v2.6.28 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.27 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.26 (2024-12-19) + +* **Bug Fix**: Fix improper use of printf-style functions. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.25 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.24 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.23 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.22 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.21 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.20 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.19 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v2.5.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index bec2c6a1..735dba7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.5.10" +const goModuleVersion = "2.6.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md new file mode 100644 index 00000000..f729db53 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -0,0 +1,283 @@ +# v1.8.3 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 + +# v1.8.2 (2025-01-24) + +* **Bug Fix**: Refactor filepath.Walk to filepath.WalkDir + +# v1.8.1 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + +# v1.8.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. + +# v1.7.3 (2024-01-22) + +* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. + +# v1.7.2 (2023-12-08) + +* **Bug Fix**: Correct loading of [services *] sections into shared config. + +# v1.7.1 (2023-11-16) + +* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it. + +# v1.7.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. + +# v1.6.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored + +# v1.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2023-11-07) + +* **Bug Fix**: Fix subproperty performance regression + +# v1.5.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.45 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.43 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. + +# v1.3.42 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.41 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.40 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.39 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.38 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.37 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.36 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.35 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.34 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.28 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2022-05-17) + +* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2022-01-28) + +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. + +# v1.3.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-07-01) + +* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. + +# v1.0.1 (2021-06-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-05-20) + +* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go new file mode 100644 index 00000000..0f278d55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go @@ -0,0 +1,22 @@ +package ini + +import "fmt" + +// UnableToReadFile is an error indicating that a ini file could not be read +type UnableToReadFile struct { + Err error +} + +// Error returns an error message and the underlying error message if present +func (e *UnableToReadFile) Error() string { + base := "unable to read file" + if e.Err == nil { + return base + } + return fmt.Sprintf("%s: %v", base, e.Err) +} + +// Unwrap returns the underlying error +func (e *UnableToReadFile) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go new file mode 100644 index 00000000..00df0e3c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ini + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.8.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go new file mode 100644 index 00000000..cefcce91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go @@ -0,0 +1,56 @@ +// Package ini implements parsing of the AWS shared config file. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +package ini + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OpenFile parses shared config from the given file path. +func OpenFile(path string) (sections Sections, err error) { + f, oerr := os.Open(path) + if oerr != nil { + return Sections{}, &UnableToReadFile{Err: oerr} + } + + defer func() { + closeErr := f.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) + } + }() + + return Parse(f, path) +} + +// Parse parses shared config from the given reader. +func Parse(r io.Reader, path string) (Sections, error) { + contents, err := io.ReadAll(r) + if err != nil { + return Sections{}, fmt.Errorf("read all: %v", err) + } + + lines := strings.Split(string(contents), "\n") + tokens, err := tokenize(lines) + if err != nil { + return Sections{}, fmt.Errorf("tokenize: %v", err) + } + + return parse(tokens, path), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go new file mode 100644 index 00000000..2422d904 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go @@ -0,0 +1,109 @@ +package ini + +import ( + "fmt" + "strings" +) + +func parse(tokens []lineToken, path string) Sections { + parser := &parser{ + path: path, + sections: NewSections(), + } + parser.parse(tokens) + return parser.sections +} + +type parser struct { + csection, ckey string // current state + path string // source file path + sections Sections // parse result +} + +func (p *parser) parse(tokens []lineToken) { + for _, otok := range tokens { + switch tok := otok.(type) { + case *lineTokenProfile: + p.handleProfile(tok) + case *lineTokenProperty: + p.handleProperty(tok) + case *lineTokenSubProperty: + p.handleSubProperty(tok) + case *lineTokenContinuation: + p.handleContinuation(tok) + } + } +} + +func (p *parser) handleProfile(tok *lineTokenProfile) { + name := tok.Name + if tok.Type != "" { + name = fmt.Sprintf("%s %s", tok.Type, tok.Name) + } + p.ckey = "" + p.csection = name + if _, ok := p.sections.container[name]; !ok { + p.sections.container[name] = NewSection(name) + } +} + +func (p *parser) handleProperty(tok *lineTokenProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + p.ckey = tok.Key + if _, ok := p.sections.container[p.csection].values[tok.Key]; ok { + section := p.sections.container[p.csection] + section.Logs = append(p.sections.container[p.csection].Logs, + fmt.Sprintf( + "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n", + p.csection, tok.Key, tok.Key, p.path, + ), + ) + p.sections.container[p.csection] = section + } + + p.sections.container[p.csection].values[tok.Key] = Value{ + str: tok.Value, + } + p.sections.container[p.csection].SourceFile[tok.Key] = p.path +} + +func (p *parser) handleSubProperty(tok *lineTokenSubProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" { + // This is an "orphaned" subproperty, either because it's at + // the beginning of a section or because the last property's + // value isn't empty. Either way we're lenient here and + // "promote" this to a normal property. + p.handleProperty(&lineTokenProperty{ + Key: tok.Key, + Value: strings.TrimSpace(trimPropertyComment(tok.Value)), + }) + return + } + + if p.sections.container[p.csection].values[p.ckey].mp == nil { + p.sections.container[p.csection].values[p.ckey] = Value{ + mp: map[string]string{}, + } + } + p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value +} + +func (p *parser) handleContinuation(tok *lineTokenContinuation) { + if p.ckey == "" { + return + } + + value, _ := p.sections.container[p.csection].values[p.ckey] + if value.str != "" && value.mp == nil { + value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value) + } + + p.sections.container[p.csection].values[p.ckey] = value +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go new file mode 100644 index 00000000..dd89848e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go @@ -0,0 +1,157 @@ +package ini + +import ( + "sort" +) + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// NewSections returns empty ini Sections +func NewSections() Sections { + return Sections{ + container: make(map[string]Section, 0), + } +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// HasSection denotes if Sections consist of a section with +// provided name. +func (t Sections) HasSection(p string) bool { + _, ok := t.container[p] + return ok +} + +// SetSection sets a section value for provided section name. +func (t Sections) SetSection(p string, v Section) Sections { + t.container[p] = v + return t +} + +// DeleteSection deletes a section entry/value for provided section name./ +func (t Sections) DeleteSection(p string) { + delete(t.container, p) +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + // Name is the Section profile name + Name string + + // values are the values within parsed profile + values values + + // Errors is the list of errors + Errors []error + + // Logs is the list of logs + Logs []string + + // SourceFile is the INI Source file from where this section + // was retrieved. They key is the property, value is the + // source file the property was retrieved from. + SourceFile map[string]string +} + +// NewSection returns an initialize section for the name +func NewSection(name string) Section { + return Section{ + Name: name, + values: values{}, + SourceFile: map[string]string{}, + } +} + +// List will return a list of all +// services in values +func (t Section) List() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// UpdateSourceFile updates source file for a property to provided filepath. +func (t Section) UpdateSourceFile(property string, filepath string) { + t.SourceFile[property] = filepath +} + +// UpdateValue updates value for a provided key with provided value +func (t Section) UpdateValue(k string, v Value) error { + t.values[k] = v + return nil +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) (bool, bool) { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) (int64, bool) { + return t.values[k].IntValue() +} + +// Map returns a map value at k +func (t Section) Map(k string) map[string]string { + return t.values[k].MapValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) (float64, bool) { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go new file mode 100644 index 00000000..ed77d083 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go @@ -0,0 +1,89 @@ +package ini + +import ( + "strings" +) + +func trimProfileComment(s string) string { + r, _, _ := strings.Cut(s, "#") + r, _, _ = strings.Cut(r, ";") + return r +} + +func trimPropertyComment(s string) string { + r, _, _ := strings.Cut(s, " #") + r, _, _ = strings.Cut(r, " ;") + r, _, _ = strings.Cut(r, "\t#") + r, _, _ = strings.Cut(r, "\t;") + return r +} + +// assumes no surrounding comment +func splitProperty(s string) (string, string, bool) { + equalsi := strings.Index(s, "=") + coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment + sep := "=" + if equalsi == -1 || coloni != -1 && coloni < equalsi { + sep = ":" + } + + k, v, ok := strings.Cut(s, sep) + if !ok { + return "", "", false + } + return strings.TrimSpace(k), strings.TrimSpace(v), true +} + +// assumes no surrounding comment, whitespace, or profile brackets +func splitProfile(s string) (string, string) { + var first int + for i, r := range s { + if isLineSpace(r) { + if first == 0 { + first = i + } + } else { + if first != 0 { + return s[:first], s[i:] + } + } + } + if first == 0 { + return "", s // type component is effectively blank + } + return "", "" +} + +func isLineSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func unquote(s string) string { + if isSingleQuoted(s) || isDoubleQuoted(s) { + return s[1 : len(s)-1] + } + return s +} + +// applies various legacy conversions to property values: +// - remote wrapping single/doublequotes +func legacyStrconv(s string) string { + s = unquote(s) + return s +} + +func isSingleQuoted(s string) bool { + return hasAffixes(s, "'", "'") +} + +func isDoubleQuoted(s string) bool { + return hasAffixes(s, `"`, `"`) +} + +func isBracketed(s string) bool { + return hasAffixes(s, "[", "]") +} + +func hasAffixes(s, left, right string) bool { + return strings.HasPrefix(s, left) && strings.HasSuffix(s, right) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go new file mode 100644 index 00000000..6e9a0374 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go @@ -0,0 +1,32 @@ +package ini + +type lineToken interface { + isLineToken() +} + +type lineTokenProfile struct { + Type string + Name string +} + +func (*lineTokenProfile) isLineToken() {} + +type lineTokenProperty struct { + Key string + Value string +} + +func (*lineTokenProperty) isLineToken() {} + +type lineTokenContinuation struct { + Value string +} + +func (*lineTokenContinuation) isLineToken() {} + +type lineTokenSubProperty struct { + Key string + Value string +} + +func (*lineTokenSubProperty) isLineToken() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go new file mode 100644 index 00000000..89a77368 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go @@ -0,0 +1,92 @@ +package ini + +import ( + "strings" +) + +func tokenize(lines []string) ([]lineToken, error) { + tokens := make([]lineToken, 0, len(lines)) + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 || isLineComment(line) { + continue + } + + if tok := asProfile(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asSubProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asContinuation(line); tok != nil { + tokens = append(tokens, tok) + } // unrecognized tokens are effectively ignored + } + return tokens, nil +} + +func isLineComment(line string) bool { + trimmed := strings.TrimLeft(line, " \t") + return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";") +} + +func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment" + trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]" + if !isBracketed(trimmed) { + return nil + } + trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ") + trimmed = strings.TrimSpace(trimmed) // "type name" / "name" + typ, name := splitProfile(trimmed) + return &lineTokenProfile{ + Type: typ, + Name: name, + } +} + +func asProperty(line string) *lineTokenProperty { + if isLineSpace(rune(line[0])) { + return nil + } + + trimmed := trimPropertyComment(line) + trimmed = strings.TrimRight(trimmed, " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenProperty{ + Key: strings.ToLower(k), // LEGACY: normalize key case + Value: legacyStrconv(v), // LEGACY: see func docs + } +} + +func asSubProperty(line string) *lineTokenSubProperty { + if !isLineSpace(rune(line[0])) { + return nil + } + + // comments on sub-properties are included in the value + trimmed := strings.TrimLeft(line, " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenSubProperty{ // same LEGACY constraints as in normal property + Key: strings.ToLower(k), + Value: legacyStrconv(v), + } +} + +func asContinuation(line string) *lineTokenContinuation { + if !isLineSpace(rune(line[0])) { + return nil + } + + // includes comments like sub-properties + trimmed := strings.TrimLeft(line, " \t") + return &lineTokenContinuation{ + Value: trimmed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go new file mode 100644 index 00000000..e3706b3c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go @@ -0,0 +1,93 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case StringType: + return "STRING" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + StringType + QuotedStringType +) + +// Value is a union container +type Value struct { + Type ValueType + + str string + mp map[string]string +} + +// NewStringValue returns a Value type generated using a string input. +func NewStringValue(str string) (Value, error) { + return Value{str: str}, nil +} + +func (v Value) String() string { + switch v.Type { + case StringType: + return fmt.Sprintf("string: %s", string(v.str)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.str)) + default: + return "union not set" + } +} + +// MapValue returns a map value for sub properties +func (v Value) MapValue() map[string]string { + return v.mp +} + +// IntValue returns an integer value +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.str), 0, 64) + if err != nil { + return 0, false + } + return i, true +} + +// FloatValue returns a float value +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.str), 64) + if err != nil { + return 0, false + } + return f, true +} + +// BoolValue returns a bool value +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if strings.EqualFold(v.str, "true") { + return true, true + } else if strings.EqualFold(v.str, "false") { + return false, true + } + return false, false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + return v.str +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go new file mode 100644 index 00000000..8e24a3f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "context" + "sync/atomic" + "time" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// AddTimeOffsetMiddleware sets a value representing clock skew on the request context. +// This can be read by other operations (such as signing) to correct the date value they send +// on the request +type AddTimeOffsetMiddleware struct { + Offset *atomic.Int64 +} + +// ID the identifier for AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" } + +// HandleBuild sets a value for attemptSkew on the request context if one is set on the client. +func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + if m.Offset != nil { + offset := time.Duration(m.Offset.Load()) + ctx = internalcontext.SetAttemptSkewContext(ctx, offset) + } + return next.HandleBuild(ctx, in) +} + +// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer +// held by AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 { + m.Offset.Store(v.Nanoseconds()) + } + return next.HandleDeserialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000..c96b717e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go @@ -0,0 +1,47 @@ +package shareddefaults + +import ( + "os" + "os/user" + "path/filepath" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + // Ignore errors since we only care about Windows and *nix. + home, _ := os.UserHomeDir() + + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir + } + + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md index 8aa94972..c574867d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -1,3 +1,150 @@ +# v1.3.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2025-01-30) + +* **Bug Fix**: Do not sign Transfer-Encoding header in Sigv4[a]. Fixes a signer mismatch issue with S3 Accelerate. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.3.28 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go index 2a5888c1..b6c06c70 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -3,4 +3,4 @@ package v4a // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.10" +const goModuleVersion = "1.3.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go index 3487dc33..688f8347 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go @@ -4,9 +4,10 @@ package v4 var IgnoredHeaders = Rules{ DenyList{ MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + "Transfer-Encoding": struct{}{}, }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go index 516d459d..af4f6abc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" @@ -72,7 +74,11 @@ func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, sdk.NowTime(), func(o *SignerOptions) { + signingTime := sdk.NowTime() + if skew := internalcontext.GetAttemptSkewContext(ctx); skew != 0 { + signingTime.Add(skew) + } + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, signingTime, func(o *SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/CHANGELOG.md new file mode 100644 index 00000000..aadb1d98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/CHANGELOG.md @@ -0,0 +1,685 @@ +# v1.50.4 (2025-04-16) + +* No change notes available for this release. + +# v1.50.3 (2025-04-10) + +* No change notes available for this release. + +# v1.50.2 (2025-04-03) + +* No change notes available for this release. + +# v1.50.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.50.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.11 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.10 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.9 (2025-02-04) + +* No change notes available for this release. + +# v1.49.8 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.7 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.6 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.49.5 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.49.4 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.3 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.2 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.49.1 (2024-12-17) + +* No change notes available for this release. + +# v1.49.0 (2024-12-03.2) + +* **Feature**: Add FEDERATED type to CreateDataCatalog. This creates Athena Data Catalog, AWS Lambda connector, and AWS Glue connection. Create/DeleteDataCatalog returns DataCatalog. Add Status, ConnectionType, and Error to DataCatalog and DataCatalogSummary. Add DeleteCatalogOnly to delete Athena Catalog only. + +# v1.48.5 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.48.4 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.48.3 (2024-11-07) + +* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses + +# v1.48.2 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.48.1 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.48.0 (2024-10-18) + +* **Feature**: **BREAKING CHANGE**: Remove DataCatalog from Create/DeleteDataCatalog. Remove Status, ConnectionType, and Error from DataCatalog and DataCatalogSummary. These were released inadvertently with no functionality. They were not populated or populated with a default value. Code related to these changes can be safely removed. + +# v1.47.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.46.3 (2024-10-03) + +* No change notes available for this release. + +# v1.46.2 (2024-09-27) + +* No change notes available for this release. + +# v1.46.1 (2024-09-25) + +* No change notes available for this release. + +# v1.46.0 (2024-09-23) + +* **Feature**: List/Get/Update/Delete/CreateDataCatalog now integrate with AWS Glue connections. Users can create a Glue connection through Athena or use a Glue connection to define their Athena federated parameters. + +# v1.45.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.44.7 (2024-09-04) + +* No change notes available for this release. + +# v1.44.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.4 (2024-08-09) + +* No change notes available for this release. + +# v1.44.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.44.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.43.0 (2024-06-19) + +* **Feature**: Add v2 smoke tests and smithy smokeTests trait for SDK testing. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.42.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.3 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.2 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.1 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.0 (2024-05-29) + +* **Feature**: Throwing validation errors on CreateNotebook with Name containing `/`,`:`,`\` + +# v1.40.8 (2024-05-23) + +* No change notes available for this release. + +# v1.40.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.40.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.3 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.39.3 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.39.2 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.39.1 (2024-02-15) + +* **Bug Fix**: Correct failure to determine the error type in awsJson services that could occur when errors were modeled with a non-string `code` field. + +# v1.39.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.0 (2024-01-19) + +* **Feature**: Introducing new NotebookS3LocationUri parameter to Athena ImportNotebook API. Payload is no longer required and either Payload or NotebookS3LocationUri needs to be provided (not both) for a successful ImportNotebook API call. If both are provided, an InvalidRequestException will be thrown. + +# v1.37.4 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.3 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.37.2 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + +# v1.37.0 (2023-12-05) + +* **Feature**: Adding IdentityCenter enabled request for interactive query + +# v1.36.2 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.3 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.2 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.35.1 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.35.0 (2023-11-17) + +* **Feature**: Adding SerivicePreProcessing time metric + +# v1.34.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.34.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2023-10-24) + +* **Feature**: **BREAKFIX**: Correct nullability and default value representation of various input fields across a large number of services. Calling code that references one or more of the affected fields will need to update usage accordingly. See [2162](https://github.com/aws/aws-sdk-go-v2/issues/2162). + +# v1.31.8 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.7 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.6 (2023-08-24) + +* No change notes available for this release. + +# v1.31.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.1 (2023-08-01) + +* No change notes available for this release. + +# v1.31.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.5 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.4 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.3 (2023-07-03) + +* No change notes available for this release. + +# v1.30.2 (2023-06-15) + +* No change notes available for this release. + +# v1.30.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2023-06-08) + +* **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. + +# v1.29.0 (2023-06-02) + +* **Feature**: This release introduces the DeleteCapacityReservation API and the ability to manage capacity reservations using CloudFormation + +# v1.28.0 (2023-05-18) + +* **Feature**: Removing SparkProperties from EngineConfiguration object for StartSession API call + +# v1.27.0 (2023-05-15) + +* **Feature**: You can now define custom spark properties at start of the session for use cases like cluster encryption, table formats, and general Spark tuning. + +# v1.26.1 (2023-05-04) + +* No change notes available for this release. + +# v1.26.0 (2023-04-28) + +* **Feature**: You can now use capacity reservations on Amazon Athena to run SQL queries on fully-managed compute capacity. + +# v1.25.4 (2023-04-27) + +* No change notes available for this release. + +# v1.25.3 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.2 (2023-04-10) + +* No change notes available for this release. + +# v1.25.1 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-03-30) + +* **Feature**: Make DefaultExecutorDpuSize and CoordinatorDpuSize fields optional in StartSession + +# v1.24.0 (2023-03-27) + +* **Feature**: Enforces a minimal level of encryption for the workgroup for query and calculation results that are written to Amazon S3. When enabled, workgroup users can set encryption only to the minimum level set by the administrator or higher when they submit queries. + +# v1.23.2 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-03-08) + +* **Feature**: A new field SubstatementType is added to GetQueryExecution API, so customers have an error free way to detect the query type and interpret the result. + +# v1.22.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.22.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.22.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.21.0 (2022-12-19) + +* **Feature**: Add missed InvalidRequestException in GetCalculationExecutionCode,StopCalculationExecution APIs. Correct required parameters (Payload and Type) in UpdateNotebook API. Change Notebook size from 15 Mb to 10 Mb. + +# v1.20.3 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.2 (2022-12-08) + +* No change notes available for this release. + +# v1.20.1 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2022-11-30) + +* **Feature**: This release includes support for using Apache Spark in Amazon Athena. + +# v1.19.1 (2022-11-08) + +* No change notes available for this release. + +# v1.19.0 (2022-11-07) + +* **Feature**: Adds support for using Query Result Reuse + +# v1.18.12 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-09-01) + +* No change notes available for this release. + +# v1.18.6 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-07-21) + +* **Feature**: This feature allows customers to retrieve runtime statistics for completed queries + +# v1.17.0 (2022-07-14) + +* **Feature**: This release updates data types that contain either QueryExecutionId, NamedQueryId or ExpectedBucketOwner. Ids must be between 1 and 128 characters and contain only non-whitespace characters. ExpectedBucketOwner must be 12-digit string. + +# v1.16.1 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-06-30) + +* **Feature**: This feature introduces the API support for Athena's parameterized query and BatchGetPreparedStatement API. + +# v1.15.4 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-04-15) + +* **Feature**: This release adds subfields, ErrorMessage, Retryable, to the AthenaError response object in the GetQueryExecution API when a query fails. + +# v1.14.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. + +# v1.9.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.8.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-12) + +* **Feature**: API client updated + +# v1.4.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_client.go new file mode 100644 index 00000000..24468853 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_client.go @@ -0,0 +1,959 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + cryptorand "crypto/rand" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "sync/atomic" + "time" +) + +const ServiceID = "Athena" +const ServiceAPIVersion = "2017-05-18" + +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/athena") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/athena") +} + +// Client provides the API client to make operations call for Amazon Athena. +type Client struct { + options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveIdempotencyTokenProvider(&options) + + resolveEndpointResolverV2(&options) + + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + + ignoreAnonymousAuth(&options) + + wrapWithAnonymousAuth(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + initializeTimeOffsetResolver(client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + finalizeOperationRetryMaxAttempts(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + ctx, err = withOperationMetrics(ctx, options.MeterProvider) + if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/athena") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "athena", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func resolveIdempotencyTokenProvider(o *Options) { + if o.IdempotencyTokenProvider != nil { + return + } + o.IdempotencyTokenProvider = smithyrand.NewUUIDIdempotencyToken(cryptorand.Reader) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/athena") + }) + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + +// IdempotencyTokenProvider interface for providing idempotency token +type IdempotencyTokenProvider interface { + GetIdempotencyToken() (string, error) +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetNamedQuery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetNamedQuery.go new file mode 100644 index 00000000..b5aeeb86 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetNamedQuery.go @@ -0,0 +1,170 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the details of a single named query or a list of up to 50 queries, +// which you provide as an array of query ID strings. Requires you to have access +// to the workgroup in which the queries were saved. Use ListNamedQueriesInputto get the list of named +// query IDs in the specified workgroup. If information could not be retrieved for +// a submitted query ID, information about the query ID submitted is listed under UnprocessedNamedQueryId +// . Named queries differ from executed queries. Use BatchGetQueryExecutionInputto get details about each +// unique query execution, and ListQueryExecutionsInputto get a list of query execution IDs. +func (c *Client) BatchGetNamedQuery(ctx context.Context, params *BatchGetNamedQueryInput, optFns ...func(*Options)) (*BatchGetNamedQueryOutput, error) { + if params == nil { + params = &BatchGetNamedQueryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchGetNamedQuery", params, optFns, c.addOperationBatchGetNamedQueryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchGetNamedQueryOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Contains an array of named query IDs. +type BatchGetNamedQueryInput struct { + + // An array of query IDs. + // + // This member is required. + NamedQueryIds []string + + noSmithyDocumentSerde +} + +type BatchGetNamedQueryOutput struct { + + // Information about the named query IDs submitted. + NamedQueries []types.NamedQuery + + // Information about provided query IDs. + UnprocessedNamedQueryIds []types.UnprocessedNamedQueryId + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchGetNamedQueryMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchGetNamedQuery{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchGetNamedQuery{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "BatchGetNamedQuery"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpBatchGetNamedQueryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetNamedQuery(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchGetNamedQuery(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "BatchGetNamedQuery", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetPreparedStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetPreparedStatement.go new file mode 100644 index 00000000..d190c363 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetPreparedStatement.go @@ -0,0 +1,173 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the details of a single prepared statement or a list of up to 256 +// prepared statements for the array of prepared statement names that you provide. +// Requires you to have access to the workgroup to which the prepared statements +// belong. If a prepared statement cannot be retrieved for the name specified, the +// statement is listed in UnprocessedPreparedStatementNames . +func (c *Client) BatchGetPreparedStatement(ctx context.Context, params *BatchGetPreparedStatementInput, optFns ...func(*Options)) (*BatchGetPreparedStatementOutput, error) { + if params == nil { + params = &BatchGetPreparedStatementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchGetPreparedStatement", params, optFns, c.addOperationBatchGetPreparedStatementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchGetPreparedStatementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type BatchGetPreparedStatementInput struct { + + // A list of prepared statement names to return. + // + // This member is required. + PreparedStatementNames []string + + // The name of the workgroup to which the prepared statements belong. + // + // This member is required. + WorkGroup *string + + noSmithyDocumentSerde +} + +type BatchGetPreparedStatementOutput struct { + + // The list of prepared statements returned. + PreparedStatements []types.PreparedStatement + + // A list of one or more prepared statements that were requested but could not be + // returned. + UnprocessedPreparedStatementNames []types.UnprocessedPreparedStatementName + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchGetPreparedStatementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchGetPreparedStatement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchGetPreparedStatement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "BatchGetPreparedStatement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpBatchGetPreparedStatementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetPreparedStatement(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchGetPreparedStatement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "BatchGetPreparedStatement", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetQueryExecution.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetQueryExecution.go new file mode 100644 index 00000000..f47a7edb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_BatchGetQueryExecution.go @@ -0,0 +1,168 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the details of a single query execution or a list of up to 50 query +// executions, which you provide as an array of query execution ID strings. +// Requires you to have access to the workgroup in which the queries ran. To get a +// list of query execution IDs, use ListQueryExecutionsInput$WorkGroup. Query executions differ from named (saved) +// queries. Use BatchGetNamedQueryInputto get details about named queries. +func (c *Client) BatchGetQueryExecution(ctx context.Context, params *BatchGetQueryExecutionInput, optFns ...func(*Options)) (*BatchGetQueryExecutionOutput, error) { + if params == nil { + params = &BatchGetQueryExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "BatchGetQueryExecution", params, optFns, c.addOperationBatchGetQueryExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*BatchGetQueryExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +// Contains an array of query execution IDs. +type BatchGetQueryExecutionInput struct { + + // An array of query execution IDs. + // + // This member is required. + QueryExecutionIds []string + + noSmithyDocumentSerde +} + +type BatchGetQueryExecutionOutput struct { + + // Information about a query execution. + QueryExecutions []types.QueryExecution + + // Information about the query executions that failed to run. + UnprocessedQueryExecutionIds []types.UnprocessedQueryExecutionId + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationBatchGetQueryExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpBatchGetQueryExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpBatchGetQueryExecution{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "BatchGetQueryExecution"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpBatchGetQueryExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetQueryExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opBatchGetQueryExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "BatchGetQueryExecution", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CancelCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CancelCapacityReservation.go new file mode 100644 index 00000000..38701a36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CancelCapacityReservation.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Cancels the capacity reservation with the specified name. Cancelled +// reservations remain in your account and will be deleted 45 days after +// cancellation. During the 45 days, you cannot re-purpose or reuse a reservation +// that has been cancelled, but you can refer to its tags and view it for +// historical reference. +func (c *Client) CancelCapacityReservation(ctx context.Context, params *CancelCapacityReservationInput, optFns ...func(*Options)) (*CancelCapacityReservationOutput, error) { + if params == nil { + params = &CancelCapacityReservationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CancelCapacityReservation", params, optFns, c.addOperationCancelCapacityReservationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CancelCapacityReservationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CancelCapacityReservationInput struct { + + // The name of the capacity reservation to cancel. + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +type CancelCapacityReservationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCancelCapacityReservationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCancelCapacityReservation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCancelCapacityReservation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CancelCapacityReservation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCancelCapacityReservationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelCapacityReservation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCancelCapacityReservation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CancelCapacityReservation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateCapacityReservation.go new file mode 100644 index 00000000..ceadcaa2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateCapacityReservation.go @@ -0,0 +1,165 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a capacity reservation with the specified name and number of requested +// data processing units. +func (c *Client) CreateCapacityReservation(ctx context.Context, params *CreateCapacityReservationInput, optFns ...func(*Options)) (*CreateCapacityReservationOutput, error) { + if params == nil { + params = &CreateCapacityReservationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateCapacityReservation", params, optFns, c.addOperationCreateCapacityReservationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateCapacityReservationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateCapacityReservationInput struct { + + // The name of the capacity reservation to create. + // + // This member is required. + Name *string + + // The number of requested data processing units. + // + // This member is required. + TargetDpus *int32 + + // The tags for the capacity reservation. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateCapacityReservationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateCapacityReservationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCapacityReservation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCapacityReservation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateCapacityReservation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateCapacityReservationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCapacityReservation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCapacityReservation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateCapacityReservation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateDataCatalog.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateDataCatalog.go new file mode 100644 index 00000000..0d6d4956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateDataCatalog.go @@ -0,0 +1,259 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates (registers) a data catalog with the specified name and properties. +// Catalogs created are visible to all users of the same Amazon Web Services +// account. +// +// This API operation creates the following resources. +// +// - CFN Stack Name with a maximum length of 128 characters and prefix +// athenafederatedcatalog-CATALOG_NAME_SANITIZED with length 23 characters. +// +// - Lambda Function Name with a maximum length of 64 characters and prefix +// athenafederatedcatalog_CATALOG_NAME_SANITIZED with length 23 characters. +// +// - Glue Connection Name with a maximum length of 255 characters and a prefix +// athenafederatedcatalog_CATALOG_NAME_SANITIZED with length 23 characters. +func (c *Client) CreateDataCatalog(ctx context.Context, params *CreateDataCatalogInput, optFns ...func(*Options)) (*CreateDataCatalogOutput, error) { + if params == nil { + params = &CreateDataCatalogInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateDataCatalog", params, optFns, c.addOperationCreateDataCatalogMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateDataCatalogOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateDataCatalogInput struct { + + // The name of the data catalog to create. The catalog name must be unique for the + // Amazon Web Services account and can use a maximum of 127 alphanumeric, + // underscore, at sign, or hyphen characters. The remainder of the length + // constraint of 256 is reserved for use by Athena. + // + // For FEDERATED type the catalog name has following considerations and limits: + // + // - The catalog name allows special characters such as _ , @ , \ , - . These + // characters are replaced with a hyphen (-) when creating the CFN Stack Name and + // with an underscore (_) when creating the Lambda Function and Glue Connection + // Name. + // + // - The catalog name has a theoretical limit of 128 characters. However, since + // we use it to create other resources that allow less characters and we prepend a + // prefix to it, the actual catalog name limit for FEDERATED catalog is 64 - 23 = + // 41 characters. + // + // This member is required. + Name *string + + // The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an + // Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is + // a federated catalog for which Athena creates the connection and the Lambda + // function for you based on the parameters that you pass. + // + // This member is required. + Type types.DataCatalogType + + // A description of the data catalog to be created. + Description *string + + // Specifies the Lambda function or functions to use for creating the data + // catalog. This is a mapping whose values depend on the catalog type. + // + // - For the HIVE data catalog type, use the following syntax. The + // metadata-function parameter is required. The sdk-version parameter is optional + // and defaults to the currently supported version. + // + // metadata-function=lambda_arn, sdk-version=version_number + // + // - For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. + // + // - If you have one Lambda function that processes metadata and another for + // reading the actual data, use the following syntax. Both parameters are required. + // + // metadata-function=lambda_arn, record-function=lambda_arn + // + // - If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. + // + // function=lambda_arn + // + // - The GLUE type takes a catalog ID parameter and is required. The catalog_id + // is the account ID of the Amazon Web Services account to which the Glue Data + // Catalog belongs. + // + // catalog-id=catalog_id + // + // - The GLUE data catalog type also applies to the default AwsDataCatalog that + // already exists in your account, of which you can have only one and cannot + // modify. + // + // - The FEDERATED data catalog type uses one of the following parameters, but + // not both. Use connection-arn for an existing Glue connection. Use + // connection-type and connection-properties to specify the configuration setting + // for a new connection. + // + // - connection-arn: + // + // - lambda-role-arn (optional): The execution role to use for the Lambda + // function. If not provided, one is created. + // + // - connection-type:MYSQL|REDSHIFT|...., connection-properties:"" + // + // For , use escaped JSON text, as in the following example. + // + // "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" + Parameters map[string]string + + // A list of comma separated tags to add to the data catalog that is created. All + // the resources that are created by the CreateDataCatalog API operation with + // FEDERATED type will have the tag federated_athena_datacatalog="true" . This + // includes the CFN Stack, Glue Connection, Athena DataCatalog, and all the + // resources created as part of the CFN Stack (Lambda Function, IAM + // policies/roles). + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateDataCatalogOutput struct { + + // Contains information about a data catalog in an Amazon Web Services account. + // + // In the Athena console, data catalogs are listed as "data sources" on the Data + // sources page under the Data source name column. + DataCatalog *types.DataCatalog + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateDataCatalogMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateDataCatalog{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateDataCatalog{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateDataCatalog"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateDataCatalogValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDataCatalog(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateDataCatalog(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateDataCatalog", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateNamedQuery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateNamedQuery.go new file mode 100644 index 00000000..984ef27d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateNamedQuery.go @@ -0,0 +1,223 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a named query in the specified workgroup. Requires that you have access +// to the workgroup. +func (c *Client) CreateNamedQuery(ctx context.Context, params *CreateNamedQueryInput, optFns ...func(*Options)) (*CreateNamedQueryOutput, error) { + if params == nil { + params = &CreateNamedQueryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateNamedQuery", params, optFns, c.addOperationCreateNamedQueryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateNamedQueryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateNamedQueryInput struct { + + // The database to which the query belongs. + // + // This member is required. + Database *string + + // The query name. + // + // This member is required. + Name *string + + // The contents of the query with all query statements. + // + // This member is required. + QueryString *string + + // A unique case-sensitive string used to ensure the request to create the query + // is idempotent (executes only once). If another CreateNamedQuery request is + // received, the same response is returned and another query is not created. If a + // parameter has changed, for example, the QueryString , an error is returned. + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for users. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + // The query description. + Description *string + + // The name of the workgroup in which the named query is being created. + WorkGroup *string + + noSmithyDocumentSerde +} + +type CreateNamedQueryOutput struct { + + // The unique ID of the query. + NamedQueryId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateNamedQueryMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateNamedQuery{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateNamedQuery{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateNamedQuery"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opCreateNamedQueryMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateNamedQueryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateNamedQuery(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpCreateNamedQuery struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateNamedQuery) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateNamedQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateNamedQueryInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateNamedQueryInput ") + } + + if input.ClientRequestToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientRequestToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateNamedQueryMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateNamedQuery{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opCreateNamedQuery(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateNamedQuery", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateNotebook.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateNotebook.go new file mode 100644 index 00000000..67791a25 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateNotebook.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates an empty ipynb file in the specified Apache Spark enabled workgroup. +// Throws an error if a file in the workgroup with the same name already exists. +func (c *Client) CreateNotebook(ctx context.Context, params *CreateNotebookInput, optFns ...func(*Options)) (*CreateNotebookOutput, error) { + if params == nil { + params = &CreateNotebookInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateNotebook", params, optFns, c.addOperationCreateNotebookMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateNotebookOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateNotebookInput struct { + + // The name of the ipynb file to be created in the Spark workgroup, without the + // .ipynb extension. + // + // This member is required. + Name *string + + // The name of the Spark enabled workgroup in which the notebook will be created. + // + // This member is required. + WorkGroup *string + + // A unique case-sensitive string used to ensure the request to create the + // notebook is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for you. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + noSmithyDocumentSerde +} + +type CreateNotebookOutput struct { + + // A unique identifier for the notebook. + NotebookId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateNotebookMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateNotebook{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateNotebook{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateNotebook"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateNotebookValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateNotebook(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateNotebook(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateNotebook", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreatePreparedStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreatePreparedStatement.go new file mode 100644 index 00000000..0b1c640d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreatePreparedStatement.go @@ -0,0 +1,168 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a prepared statement for use with SQL queries in Athena. +func (c *Client) CreatePreparedStatement(ctx context.Context, params *CreatePreparedStatementInput, optFns ...func(*Options)) (*CreatePreparedStatementOutput, error) { + if params == nil { + params = &CreatePreparedStatementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreatePreparedStatement", params, optFns, c.addOperationCreatePreparedStatementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreatePreparedStatementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreatePreparedStatementInput struct { + + // The query string for the prepared statement. + // + // This member is required. + QueryStatement *string + + // The name of the prepared statement. + // + // This member is required. + StatementName *string + + // The name of the workgroup to which the prepared statement belongs. + // + // This member is required. + WorkGroup *string + + // The description of the prepared statement. + Description *string + + noSmithyDocumentSerde +} + +type CreatePreparedStatementOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreatePreparedStatementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreatePreparedStatement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreatePreparedStatement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreatePreparedStatement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreatePreparedStatementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreatePreparedStatement(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreatePreparedStatement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreatePreparedStatement", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreatePresignedNotebookUrl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreatePresignedNotebookUrl.go new file mode 100644 index 00000000..463084cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreatePresignedNotebookUrl.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets an authentication token and the URL at which the notebook can be accessed. +// During programmatic access, CreatePresignedNotebookUrl must be called every 10 +// minutes to refresh the authentication token. For information about granting +// programmatic access, see [Grant programmatic access]. +// +// [Grant programmatic access]: https://docs.aws.amazon.com/athena/latest/ug/setting-up.html#setting-up-grant-programmatic-access +func (c *Client) CreatePresignedNotebookUrl(ctx context.Context, params *CreatePresignedNotebookUrlInput, optFns ...func(*Options)) (*CreatePresignedNotebookUrlOutput, error) { + if params == nil { + params = &CreatePresignedNotebookUrlInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreatePresignedNotebookUrl", params, optFns, c.addOperationCreatePresignedNotebookUrlMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreatePresignedNotebookUrlOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreatePresignedNotebookUrlInput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + noSmithyDocumentSerde +} + +type CreatePresignedNotebookUrlOutput struct { + + // The authentication token for the notebook. + // + // This member is required. + AuthToken *string + + // The UTC epoch time when the authentication token expires. + // + // This member is required. + AuthTokenExpirationTime *int64 + + // The URL of the notebook. The URL includes the authentication token and notebook + // file name and points directly to the opened notebook. + // + // This member is required. + NotebookUrl *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreatePresignedNotebookUrlMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreatePresignedNotebookUrl{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreatePresignedNotebookUrl{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreatePresignedNotebookUrl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreatePresignedNotebookUrlValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreatePresignedNotebookUrl(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreatePresignedNotebookUrl(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreatePresignedNotebookUrl", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateWorkGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateWorkGroup.go new file mode 100644 index 00000000..1be9c452 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_CreateWorkGroup.go @@ -0,0 +1,173 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a workgroup with the specified name. A workgroup can be an Apache Spark +// enabled workgroup or an Athena SQL workgroup. +func (c *Client) CreateWorkGroup(ctx context.Context, params *CreateWorkGroupInput, optFns ...func(*Options)) (*CreateWorkGroupOutput, error) { + if params == nil { + params = &CreateWorkGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateWorkGroup", params, optFns, c.addOperationCreateWorkGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateWorkGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateWorkGroupInput struct { + + // The workgroup name. + // + // This member is required. + Name *string + + // Contains configuration information for creating an Athena SQL workgroup or + // Spark enabled Athena workgroup. Athena SQL workgroup configuration includes the + // location in Amazon S3 where query and calculation results are stored, the + // encryption configuration, if any, used for encrypting query results, whether the + // Amazon CloudWatch Metrics are enabled for the workgroup, the limit for the + // amount of bytes scanned (cutoff) per query, if it is specified, and whether + // workgroup's settings (specified with EnforceWorkGroupConfiguration ) in the + // WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + Configuration *types.WorkGroupConfiguration + + // The workgroup description. + Description *string + + // A list of comma separated tags to add to the workgroup that is created. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateWorkGroupOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateWorkGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateWorkGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateWorkGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateWorkGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateWorkGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateWorkGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateWorkGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateWorkGroup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteCapacityReservation.go new file mode 100644 index 00000000..c6782f0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteCapacityReservation.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a cancelled capacity reservation. A reservation must be cancelled +// before it can be deleted. A deleted reservation is immediately removed from your +// account and can no longer be referenced, including by its ARN. A deleted +// reservation cannot be called by GetCapacityReservation , and deleted +// reservations do not appear in the output of ListCapacityReservations . +func (c *Client) DeleteCapacityReservation(ctx context.Context, params *DeleteCapacityReservationInput, optFns ...func(*Options)) (*DeleteCapacityReservationOutput, error) { + if params == nil { + params = &DeleteCapacityReservationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteCapacityReservation", params, optFns, c.addOperationDeleteCapacityReservationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteCapacityReservationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteCapacityReservationInput struct { + + // The name of the capacity reservation to delete. + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +type DeleteCapacityReservationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteCapacityReservationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteCapacityReservation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteCapacityReservation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteCapacityReservation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteCapacityReservationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCapacityReservation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteCapacityReservation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteCapacityReservation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteDataCatalog.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteDataCatalog.go new file mode 100644 index 00000000..8adb9cbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteDataCatalog.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a data catalog. +func (c *Client) DeleteDataCatalog(ctx context.Context, params *DeleteDataCatalogInput, optFns ...func(*Options)) (*DeleteDataCatalogOutput, error) { + if params == nil { + params = &DeleteDataCatalogInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteDataCatalog", params, optFns, c.addOperationDeleteDataCatalogMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteDataCatalogOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteDataCatalogInput struct { + + // The name of the data catalog to delete. + // + // This member is required. + Name *string + + // Deletes the Athena Data Catalog. You can only use this with the FEDERATED + // catalogs. You usually perform this before registering the connector with Glue + // Data Catalog. After deletion, you will have to manage the Glue Connection and + // Lambda function. + DeleteCatalogOnly bool + + noSmithyDocumentSerde +} + +type DeleteDataCatalogOutput struct { + + // Contains information about a data catalog in an Amazon Web Services account. + // + // In the Athena console, data catalogs are listed as "data sources" on the Data + // sources page under the Data source name column. + DataCatalog *types.DataCatalog + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteDataCatalogMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteDataCatalog{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteDataCatalog{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteDataCatalog"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteDataCatalogValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteDataCatalog(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteDataCatalog(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteDataCatalog", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteNamedQuery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteNamedQuery.go new file mode 100644 index 00000000..d83992e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteNamedQuery.go @@ -0,0 +1,192 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the named query if you have access to the workgroup in which the query +// was saved. +func (c *Client) DeleteNamedQuery(ctx context.Context, params *DeleteNamedQueryInput, optFns ...func(*Options)) (*DeleteNamedQueryOutput, error) { + if params == nil { + params = &DeleteNamedQueryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteNamedQuery", params, optFns, c.addOperationDeleteNamedQueryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteNamedQueryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteNamedQueryInput struct { + + // The unique ID of the query to delete. + // + // This member is required. + NamedQueryId *string + + noSmithyDocumentSerde +} + +type DeleteNamedQueryOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteNamedQueryMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteNamedQuery{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteNamedQuery{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteNamedQuery"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opDeleteNamedQueryMiddleware(stack, options); err != nil { + return err + } + if err = addOpDeleteNamedQueryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteNamedQuery(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpDeleteNamedQuery struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpDeleteNamedQuery) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpDeleteNamedQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*DeleteNamedQueryInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *DeleteNamedQueryInput ") + } + + if input.NamedQueryId == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.NamedQueryId = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opDeleteNamedQueryMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpDeleteNamedQuery{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opDeleteNamedQuery(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteNamedQuery", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteNotebook.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteNotebook.go new file mode 100644 index 00000000..85bb0483 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteNotebook.go @@ -0,0 +1,155 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified notebook. +func (c *Client) DeleteNotebook(ctx context.Context, params *DeleteNotebookInput, optFns ...func(*Options)) (*DeleteNotebookOutput, error) { + if params == nil { + params = &DeleteNotebookInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteNotebook", params, optFns, c.addOperationDeleteNotebookMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteNotebookOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteNotebookInput struct { + + // The ID of the notebook to delete. + // + // This member is required. + NotebookId *string + + noSmithyDocumentSerde +} + +type DeleteNotebookOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteNotebookMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteNotebook{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteNotebook{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteNotebook"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteNotebookValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteNotebook(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteNotebook(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteNotebook", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeletePreparedStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeletePreparedStatement.go new file mode 100644 index 00000000..4025661b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeletePreparedStatement.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the prepared statement with the specified name from the specified +// workgroup. +func (c *Client) DeletePreparedStatement(ctx context.Context, params *DeletePreparedStatementInput, optFns ...func(*Options)) (*DeletePreparedStatementOutput, error) { + if params == nil { + params = &DeletePreparedStatementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeletePreparedStatement", params, optFns, c.addOperationDeletePreparedStatementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeletePreparedStatementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeletePreparedStatementInput struct { + + // The name of the prepared statement to delete. + // + // This member is required. + StatementName *string + + // The workgroup to which the statement to be deleted belongs. + // + // This member is required. + WorkGroup *string + + noSmithyDocumentSerde +} + +type DeletePreparedStatementOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeletePreparedStatementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeletePreparedStatement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeletePreparedStatement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePreparedStatement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeletePreparedStatementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePreparedStatement(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeletePreparedStatement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeletePreparedStatement", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteWorkGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteWorkGroup.go new file mode 100644 index 00000000..03f32d8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_DeleteWorkGroup.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the workgroup with the specified name. The primary workgroup cannot be +// deleted. +func (c *Client) DeleteWorkGroup(ctx context.Context, params *DeleteWorkGroupInput, optFns ...func(*Options)) (*DeleteWorkGroupOutput, error) { + if params == nil { + params = &DeleteWorkGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteWorkGroup", params, optFns, c.addOperationDeleteWorkGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteWorkGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteWorkGroupInput struct { + + // The unique name of the workgroup to delete. + // + // This member is required. + WorkGroup *string + + // The option to delete the workgroup and its contents even if the workgroup + // contains any named queries, query executions, or notebooks. + RecursiveDeleteOption *bool + + noSmithyDocumentSerde +} + +type DeleteWorkGroupOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteWorkGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteWorkGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteWorkGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteWorkGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteWorkGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteWorkGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteWorkGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteWorkGroup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ExportNotebook.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ExportNotebook.go new file mode 100644 index 00000000..9063c08e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ExportNotebook.go @@ -0,0 +1,163 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Exports the specified notebook and its metadata. +func (c *Client) ExportNotebook(ctx context.Context, params *ExportNotebookInput, optFns ...func(*Options)) (*ExportNotebookOutput, error) { + if params == nil { + params = &ExportNotebookInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ExportNotebook", params, optFns, c.addOperationExportNotebookMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ExportNotebookOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ExportNotebookInput struct { + + // The ID of the notebook to export. + // + // This member is required. + NotebookId *string + + noSmithyDocumentSerde +} + +type ExportNotebookOutput struct { + + // The notebook metadata, including notebook ID, notebook name, and workgroup name. + NotebookMetadata *types.NotebookMetadata + + // The content of the exported notebook. + Payload *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationExportNotebookMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpExportNotebook{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpExportNotebook{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ExportNotebook"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpExportNotebookValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExportNotebook(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opExportNotebook(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ExportNotebook", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecution.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecution.go new file mode 100644 index 00000000..c6664d70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecution.go @@ -0,0 +1,180 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes a previously submitted calculation execution. +func (c *Client) GetCalculationExecution(ctx context.Context, params *GetCalculationExecutionInput, optFns ...func(*Options)) (*GetCalculationExecutionOutput, error) { + if params == nil { + params = &GetCalculationExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCalculationExecution", params, optFns, c.addOperationGetCalculationExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCalculationExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCalculationExecutionInput struct { + + // The calculation execution UUID. + // + // This member is required. + CalculationExecutionId *string + + noSmithyDocumentSerde +} + +type GetCalculationExecutionOutput struct { + + // The calculation execution UUID. + CalculationExecutionId *string + + // The description of the calculation execution. + Description *string + + // Contains result information. This field is populated only if the calculation is + // completed. + Result *types.CalculationResult + + // The session ID that the calculation ran in. + SessionId *string + + // Contains information about the data processing unit (DPU) execution time and + // progress. This field is populated only when statistics are available. + Statistics *types.CalculationStatistics + + // Contains information about the status of the calculation. + Status *types.CalculationStatus + + // The Amazon S3 location in which calculation results are stored. + WorkingDirectory *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCalculationExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCalculationExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCalculationExecution{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetCalculationExecution"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetCalculationExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCalculationExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCalculationExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetCalculationExecution", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecutionCode.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecutionCode.go new file mode 100644 index 00000000..f5864706 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecutionCode.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the unencrypted code that was executed for the calculation. +func (c *Client) GetCalculationExecutionCode(ctx context.Context, params *GetCalculationExecutionCodeInput, optFns ...func(*Options)) (*GetCalculationExecutionCodeOutput, error) { + if params == nil { + params = &GetCalculationExecutionCodeInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCalculationExecutionCode", params, optFns, c.addOperationGetCalculationExecutionCodeMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCalculationExecutionCodeOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCalculationExecutionCodeInput struct { + + // The calculation execution UUID. + // + // This member is required. + CalculationExecutionId *string + + noSmithyDocumentSerde +} + +type GetCalculationExecutionCodeOutput struct { + + // The unencrypted code that was executed for the calculation. + CodeBlock *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCalculationExecutionCodeMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCalculationExecutionCode{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCalculationExecutionCode{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetCalculationExecutionCode"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetCalculationExecutionCodeValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCalculationExecutionCode(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCalculationExecutionCode(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetCalculationExecutionCode", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecutionStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecutionStatus.go new file mode 100644 index 00000000..8d76984a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCalculationExecutionStatus.go @@ -0,0 +1,163 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the status of a current calculation. +func (c *Client) GetCalculationExecutionStatus(ctx context.Context, params *GetCalculationExecutionStatusInput, optFns ...func(*Options)) (*GetCalculationExecutionStatusOutput, error) { + if params == nil { + params = &GetCalculationExecutionStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCalculationExecutionStatus", params, optFns, c.addOperationGetCalculationExecutionStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCalculationExecutionStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCalculationExecutionStatusInput struct { + + // The calculation execution UUID. + // + // This member is required. + CalculationExecutionId *string + + noSmithyDocumentSerde +} + +type GetCalculationExecutionStatusOutput struct { + + // Contains information about the DPU execution time and progress. + Statistics *types.CalculationStatistics + + // Contains information about the calculation execution status. + Status *types.CalculationStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCalculationExecutionStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCalculationExecutionStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCalculationExecutionStatus{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetCalculationExecutionStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetCalculationExecutionStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCalculationExecutionStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCalculationExecutionStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetCalculationExecutionStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCapacityAssignmentConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCapacityAssignmentConfiguration.go new file mode 100644 index 00000000..473476c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCapacityAssignmentConfiguration.go @@ -0,0 +1,165 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the capacity assignment configuration for a capacity reservation, if one +// exists. +func (c *Client) GetCapacityAssignmentConfiguration(ctx context.Context, params *GetCapacityAssignmentConfigurationInput, optFns ...func(*Options)) (*GetCapacityAssignmentConfigurationOutput, error) { + if params == nil { + params = &GetCapacityAssignmentConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCapacityAssignmentConfiguration", params, optFns, c.addOperationGetCapacityAssignmentConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCapacityAssignmentConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCapacityAssignmentConfigurationInput struct { + + // The name of the capacity reservation to retrieve the capacity assignment + // configuration for. + // + // This member is required. + CapacityReservationName *string + + noSmithyDocumentSerde +} + +type GetCapacityAssignmentConfigurationOutput struct { + + // The requested capacity assignment configuration for the specified capacity + // reservation. + // + // This member is required. + CapacityAssignmentConfiguration *types.CapacityAssignmentConfiguration + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCapacityAssignmentConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCapacityAssignmentConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCapacityAssignmentConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetCapacityAssignmentConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetCapacityAssignmentConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCapacityAssignmentConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCapacityAssignmentConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetCapacityAssignmentConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCapacityReservation.go new file mode 100644 index 00000000..5106ef14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetCapacityReservation.go @@ -0,0 +1,162 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about the capacity reservation with the specified name. +func (c *Client) GetCapacityReservation(ctx context.Context, params *GetCapacityReservationInput, optFns ...func(*Options)) (*GetCapacityReservationOutput, error) { + if params == nil { + params = &GetCapacityReservationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCapacityReservation", params, optFns, c.addOperationGetCapacityReservationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCapacityReservationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCapacityReservationInput struct { + + // The name of the capacity reservation. + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +type GetCapacityReservationOutput struct { + + // The requested capacity reservation structure. + // + // This member is required. + CapacityReservation *types.CapacityReservation + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCapacityReservationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetCapacityReservation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetCapacityReservation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetCapacityReservation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetCapacityReservationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCapacityReservation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCapacityReservation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetCapacityReservation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetDataCatalog.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetDataCatalog.go new file mode 100644 index 00000000..d1e8a684 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetDataCatalog.go @@ -0,0 +1,163 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the specified data catalog. +func (c *Client) GetDataCatalog(ctx context.Context, params *GetDataCatalogInput, optFns ...func(*Options)) (*GetDataCatalogOutput, error) { + if params == nil { + params = &GetDataCatalogInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDataCatalog", params, optFns, c.addOperationGetDataCatalogMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetDataCatalogOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetDataCatalogInput struct { + + // The name of the data catalog to return. + // + // This member is required. + Name *string + + // The name of the workgroup. Required if making an IAM Identity Center request. + WorkGroup *string + + noSmithyDocumentSerde +} + +type GetDataCatalogOutput struct { + + // The data catalog returned. + DataCatalog *types.DataCatalog + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetDataCatalogMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetDataCatalog{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetDataCatalog{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetDataCatalog"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetDataCatalogValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDataCatalog(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetDataCatalog(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetDataCatalog", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetDatabase.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetDatabase.go new file mode 100644 index 00000000..f32737aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetDatabase.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a database object for the specified database and data catalog. +func (c *Client) GetDatabase(ctx context.Context, params *GetDatabaseInput, optFns ...func(*Options)) (*GetDatabaseOutput, error) { + if params == nil { + params = &GetDatabaseInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDatabase", params, optFns, c.addOperationGetDatabaseMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetDatabaseOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetDatabaseInput struct { + + // The name of the data catalog that contains the database to return. + // + // This member is required. + CatalogName *string + + // The name of the database to return. + // + // This member is required. + DatabaseName *string + + // The name of the workgroup for which the metadata is being fetched. Required if + // requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string + + noSmithyDocumentSerde +} + +type GetDatabaseOutput struct { + + // The database returned. + Database *types.Database + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetDatabaseMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetDatabase{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetDatabase{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetDatabase"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetDatabaseValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDatabase(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetDatabase(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetDatabase", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetNamedQuery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetNamedQuery.go new file mode 100644 index 00000000..7a6b0318 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetNamedQuery.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about a single query. Requires that you have access to the +// workgroup in which the query was saved. +func (c *Client) GetNamedQuery(ctx context.Context, params *GetNamedQueryInput, optFns ...func(*Options)) (*GetNamedQueryOutput, error) { + if params == nil { + params = &GetNamedQueryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetNamedQuery", params, optFns, c.addOperationGetNamedQueryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetNamedQueryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetNamedQueryInput struct { + + // The unique ID of the query. Use ListNamedQueries to get query IDs. + // + // This member is required. + NamedQueryId *string + + noSmithyDocumentSerde +} + +type GetNamedQueryOutput struct { + + // Information about the query. + NamedQuery *types.NamedQuery + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetNamedQueryMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetNamedQuery{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetNamedQuery{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetNamedQuery"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetNamedQueryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetNamedQuery(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetNamedQuery(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetNamedQuery", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetNotebookMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetNotebookMetadata.go new file mode 100644 index 00000000..e5fbc1bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetNotebookMetadata.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves notebook metadata for the specified notebook ID. +func (c *Client) GetNotebookMetadata(ctx context.Context, params *GetNotebookMetadataInput, optFns ...func(*Options)) (*GetNotebookMetadataOutput, error) { + if params == nil { + params = &GetNotebookMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetNotebookMetadata", params, optFns, c.addOperationGetNotebookMetadataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetNotebookMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetNotebookMetadataInput struct { + + // The ID of the notebook whose metadata is to be retrieved. + // + // This member is required. + NotebookId *string + + noSmithyDocumentSerde +} + +type GetNotebookMetadataOutput struct { + + // The metadata that is returned for the specified notebook ID. + NotebookMetadata *types.NotebookMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetNotebookMetadataMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetNotebookMetadata{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetNotebookMetadata{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetNotebookMetadata"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetNotebookMetadataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetNotebookMetadata(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetNotebookMetadata(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetNotebookMetadata", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetPreparedStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetPreparedStatement.go new file mode 100644 index 00000000..82d27ab5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetPreparedStatement.go @@ -0,0 +1,166 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the prepared statement with the specified name from the specified +// workgroup. +func (c *Client) GetPreparedStatement(ctx context.Context, params *GetPreparedStatementInput, optFns ...func(*Options)) (*GetPreparedStatementOutput, error) { + if params == nil { + params = &GetPreparedStatementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetPreparedStatement", params, optFns, c.addOperationGetPreparedStatementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetPreparedStatementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetPreparedStatementInput struct { + + // The name of the prepared statement to retrieve. + // + // This member is required. + StatementName *string + + // The workgroup to which the statement to be retrieved belongs. + // + // This member is required. + WorkGroup *string + + noSmithyDocumentSerde +} + +type GetPreparedStatementOutput struct { + + // The name of the prepared statement that was retrieved. + PreparedStatement *types.PreparedStatement + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetPreparedStatementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetPreparedStatement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetPreparedStatement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetPreparedStatement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetPreparedStatementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPreparedStatement(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetPreparedStatement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetPreparedStatement", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryExecution.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryExecution.go new file mode 100644 index 00000000..f399ad0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryExecution.go @@ -0,0 +1,162 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about a single execution of a query if you have access to +// the workgroup in which the query ran. Each time a query executes, information +// about the query execution is saved with a unique ID. +func (c *Client) GetQueryExecution(ctx context.Context, params *GetQueryExecutionInput, optFns ...func(*Options)) (*GetQueryExecutionOutput, error) { + if params == nil { + params = &GetQueryExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetQueryExecution", params, optFns, c.addOperationGetQueryExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetQueryExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetQueryExecutionInput struct { + + // The unique ID of the query execution. + // + // This member is required. + QueryExecutionId *string + + noSmithyDocumentSerde +} + +type GetQueryExecutionOutput struct { + + // Information about the query execution. + QueryExecution *types.QueryExecution + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetQueryExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetQueryExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetQueryExecution{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetQueryExecution"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetQueryExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetQueryExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetQueryExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetQueryExecution", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryResults.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryResults.go new file mode 100644 index 00000000..39dbf7da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryResults.go @@ -0,0 +1,285 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Streams the results of a single query execution specified by QueryExecutionId +// from the Athena query results location in Amazon S3. For more information, see [Working with query results, recent queries, and output files] +// in the Amazon Athena User Guide. This request does not execute the query but +// returns results. Use StartQueryExecutionto run a query. +// +// To stream query results successfully, the IAM principal with permission to call +// GetQueryResults also must have permissions to the Amazon S3 GetObject action +// for the Athena query results location. +// +// IAM principals with permission to the Amazon S3 GetObject action for the query +// results location are able to retrieve query results from Amazon S3 even if +// permission to the GetQueryResults action is denied. To restrict user or role +// access, ensure that Amazon S3 permissions to the Athena query location are +// denied. +// +// [Working with query results, recent queries, and output files]: https://docs.aws.amazon.com/athena/latest/ug/querying.html +func (c *Client) GetQueryResults(ctx context.Context, params *GetQueryResultsInput, optFns ...func(*Options)) (*GetQueryResultsOutput, error) { + if params == nil { + params = &GetQueryResultsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetQueryResults", params, optFns, c.addOperationGetQueryResultsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetQueryResultsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetQueryResultsInput struct { + + // The unique ID of the query execution. + // + // This member is required. + QueryExecutionId *string + + // The maximum number of results (rows) to return in this request. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + noSmithyDocumentSerde +} + +type GetQueryResultsOutput struct { + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The results of the query execution. + ResultSet *types.ResultSet + + // The number of rows inserted with a CREATE TABLE AS SELECT , INSERT INTO , or + // UPDATE statement. + UpdateCount *int64 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetQueryResultsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetQueryResults{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetQueryResults{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetQueryResults"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetQueryResultsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetQueryResults(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// GetQueryResultsPaginatorOptions is the paginator options for GetQueryResults +type GetQueryResultsPaginatorOptions struct { + // The maximum number of results (rows) to return in this request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// GetQueryResultsPaginator is a paginator for GetQueryResults +type GetQueryResultsPaginator struct { + options GetQueryResultsPaginatorOptions + client GetQueryResultsAPIClient + params *GetQueryResultsInput + nextToken *string + firstPage bool +} + +// NewGetQueryResultsPaginator returns a new GetQueryResultsPaginator +func NewGetQueryResultsPaginator(client GetQueryResultsAPIClient, params *GetQueryResultsInput, optFns ...func(*GetQueryResultsPaginatorOptions)) *GetQueryResultsPaginator { + if params == nil { + params = &GetQueryResultsInput{} + } + + options := GetQueryResultsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &GetQueryResultsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *GetQueryResultsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next GetQueryResults page. +func (p *GetQueryResultsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*GetQueryResultsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.GetQueryResults(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// GetQueryResultsAPIClient is a client that implements the GetQueryResults +// operation. +type GetQueryResultsAPIClient interface { + GetQueryResults(context.Context, *GetQueryResultsInput, ...func(*Options)) (*GetQueryResultsOutput, error) +} + +var _ GetQueryResultsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opGetQueryResults(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetQueryResults", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryRuntimeStatistics.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryRuntimeStatistics.go new file mode 100644 index 00000000..2bd28ab3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetQueryRuntimeStatistics.go @@ -0,0 +1,167 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns query execution runtime statistics related to a single execution of a +// query if you have access to the workgroup in which the query ran. Statistics +// from the Timeline section of the response object are available as soon as QueryExecutionStatus$State is +// in a SUCCEEDED or FAILED state. The remaining non-timeline statistics in the +// response (like stage-level input and output row count and data size) are updated +// asynchronously and may not be available immediately after a query completes. The +// non-timeline statistics are also not included when a query has row-level filters +// defined in Lake Formation. +func (c *Client) GetQueryRuntimeStatistics(ctx context.Context, params *GetQueryRuntimeStatisticsInput, optFns ...func(*Options)) (*GetQueryRuntimeStatisticsOutput, error) { + if params == nil { + params = &GetQueryRuntimeStatisticsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetQueryRuntimeStatistics", params, optFns, c.addOperationGetQueryRuntimeStatisticsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetQueryRuntimeStatisticsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetQueryRuntimeStatisticsInput struct { + + // The unique ID of the query execution. + // + // This member is required. + QueryExecutionId *string + + noSmithyDocumentSerde +} + +type GetQueryRuntimeStatisticsOutput struct { + + // Runtime statistics about the query execution. + QueryRuntimeStatistics *types.QueryRuntimeStatistics + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetQueryRuntimeStatisticsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetQueryRuntimeStatistics{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetQueryRuntimeStatistics{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetQueryRuntimeStatistics"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetQueryRuntimeStatisticsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetQueryRuntimeStatistics(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetQueryRuntimeStatistics(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetQueryRuntimeStatistics", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetSession.go new file mode 100644 index 00000000..b01c3997 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetSession.go @@ -0,0 +1,186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the full details of a previously created session, including the session +// status and configuration. +func (c *Client) GetSession(ctx context.Context, params *GetSessionInput, optFns ...func(*Options)) (*GetSessionOutput, error) { + if params == nil { + params = &GetSessionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSession", params, optFns, c.addOperationGetSessionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionInput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + noSmithyDocumentSerde +} + +type GetSessionOutput struct { + + // The session description. + Description *string + + // Contains engine configuration information like DPU usage. + EngineConfiguration *types.EngineConfiguration + + // The engine version used by the session (for example, PySpark engine version 3 ). + // You can get a list of engine versions by calling ListEngineVersions. + EngineVersion *string + + // The notebook version. + NotebookVersion *string + + // Contains the workgroup configuration information used by the session. + SessionConfiguration *types.SessionConfiguration + + // The session ID. + SessionId *string + + // Contains the DPU execution time. + Statistics *types.SessionStatistics + + // Contains information about the status of the session. + Status *types.SessionStatus + + // The workgroup to which the session belongs. + WorkGroup *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetSession{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetSession{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSession"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetSessionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSession(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSession(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetSession", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetSessionStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetSessionStatus.go new file mode 100644 index 00000000..4ea354ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetSessionStatus.go @@ -0,0 +1,163 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the current status of a session. +func (c *Client) GetSessionStatus(ctx context.Context, params *GetSessionStatusInput, optFns ...func(*Options)) (*GetSessionStatusOutput, error) { + if params == nil { + params = &GetSessionStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSessionStatus", params, optFns, c.addOperationGetSessionStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionStatusInput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + noSmithyDocumentSerde +} + +type GetSessionStatusOutput struct { + + // The session ID. + SessionId *string + + // Contains information about the status of the session. + Status *types.SessionStatus + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetSessionStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetSessionStatus{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetSessionStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSessionStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetSessionStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetTableMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetTableMetadata.go new file mode 100644 index 00000000..3efbafca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetTableMetadata.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns table metadata for the specified catalog, database, and table. +func (c *Client) GetTableMetadata(ctx context.Context, params *GetTableMetadataInput, optFns ...func(*Options)) (*GetTableMetadataOutput, error) { + if params == nil { + params = &GetTableMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetTableMetadata", params, optFns, c.addOperationGetTableMetadataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetTableMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetTableMetadataInput struct { + + // The name of the data catalog that contains the database and table metadata to + // return. + // + // This member is required. + CatalogName *string + + // The name of the database that contains the table metadata to return. + // + // This member is required. + DatabaseName *string + + // The name of the table for which metadata is returned. + // + // This member is required. + TableName *string + + // The name of the workgroup for which the metadata is being fetched. Required if + // requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string + + noSmithyDocumentSerde +} + +type GetTableMetadataOutput struct { + + // An object that contains table metadata. + TableMetadata *types.TableMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetTableMetadataMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetTableMetadata{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetTableMetadata{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetTableMetadata"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetTableMetadataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetTableMetadata(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetTableMetadata(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetTableMetadata", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetWorkGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetWorkGroup.go new file mode 100644 index 00000000..fdc882ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_GetWorkGroup.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about the workgroup with the specified name. +func (c *Client) GetWorkGroup(ctx context.Context, params *GetWorkGroupInput, optFns ...func(*Options)) (*GetWorkGroupOutput, error) { + if params == nil { + params = &GetWorkGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetWorkGroup", params, optFns, c.addOperationGetWorkGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetWorkGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetWorkGroupInput struct { + + // The name of the workgroup. + // + // This member is required. + WorkGroup *string + + noSmithyDocumentSerde +} + +type GetWorkGroupOutput struct { + + // Information about the workgroup. + WorkGroup *types.WorkGroup + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetWorkGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetWorkGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetWorkGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetWorkGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetWorkGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetWorkGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetWorkGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetWorkGroup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ImportNotebook.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ImportNotebook.go new file mode 100644 index 00000000..725fbf23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ImportNotebook.go @@ -0,0 +1,190 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Imports a single ipynb file to a Spark enabled workgroup. To import the +// notebook, the request must specify a value for either Payload or +// NoteBookS3LocationUri . If neither is specified or both are specified, an +// InvalidRequestException occurs. The maximum file size that can be imported is 10 +// megabytes. If an ipynb file with the same name already exists in the workgroup, +// throws an error. +func (c *Client) ImportNotebook(ctx context.Context, params *ImportNotebookInput, optFns ...func(*Options)) (*ImportNotebookOutput, error) { + if params == nil { + params = &ImportNotebookInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ImportNotebook", params, optFns, c.addOperationImportNotebookMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ImportNotebookOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ImportNotebookInput struct { + + // The name of the notebook to import. + // + // This member is required. + Name *string + + // The notebook content type. Currently, the only valid type is IPYNB . + // + // This member is required. + Type types.NotebookType + + // The name of the Spark enabled workgroup to import the notebook to. + // + // This member is required. + WorkGroup *string + + // A unique case-sensitive string used to ensure the request to import the + // notebook is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for you. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + // A URI that specifies the Amazon S3 location of a notebook file in ipynb format. + NotebookS3LocationUri *string + + // The notebook content to be imported. The payload must be in ipynb format. + Payload *string + + noSmithyDocumentSerde +} + +type ImportNotebookOutput struct { + + // The ID assigned to the imported notebook. + NotebookId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationImportNotebookMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpImportNotebook{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpImportNotebook{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ImportNotebook"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpImportNotebookValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportNotebook(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opImportNotebook(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ImportNotebook", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListApplicationDPUSizes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListApplicationDPUSizes.go new file mode 100644 index 00000000..4db6238b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListApplicationDPUSizes.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the supported DPU sizes for the supported application runtimes (for +// example, Athena notebook version 1 ). +func (c *Client) ListApplicationDPUSizes(ctx context.Context, params *ListApplicationDPUSizesInput, optFns ...func(*Options)) (*ListApplicationDPUSizesOutput, error) { + if params == nil { + params = &ListApplicationDPUSizesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListApplicationDPUSizes", params, optFns, c.addOperationListApplicationDPUSizesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListApplicationDPUSizesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListApplicationDPUSizesInput struct { + + // Specifies the maximum number of results to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. + NextToken *string + + noSmithyDocumentSerde +} + +type ListApplicationDPUSizesOutput struct { + + // A list of the supported DPU sizes that the application runtime supports. + ApplicationDPUSizes []types.ApplicationDPUSizes + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListApplicationDPUSizesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListApplicationDPUSizes{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListApplicationDPUSizes{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListApplicationDPUSizes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListApplicationDPUSizes(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListApplicationDPUSizesPaginatorOptions is the paginator options for +// ListApplicationDPUSizes +type ListApplicationDPUSizesPaginatorOptions struct { + // Specifies the maximum number of results to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListApplicationDPUSizesPaginator is a paginator for ListApplicationDPUSizes +type ListApplicationDPUSizesPaginator struct { + options ListApplicationDPUSizesPaginatorOptions + client ListApplicationDPUSizesAPIClient + params *ListApplicationDPUSizesInput + nextToken *string + firstPage bool +} + +// NewListApplicationDPUSizesPaginator returns a new +// ListApplicationDPUSizesPaginator +func NewListApplicationDPUSizesPaginator(client ListApplicationDPUSizesAPIClient, params *ListApplicationDPUSizesInput, optFns ...func(*ListApplicationDPUSizesPaginatorOptions)) *ListApplicationDPUSizesPaginator { + if params == nil { + params = &ListApplicationDPUSizesInput{} + } + + options := ListApplicationDPUSizesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListApplicationDPUSizesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListApplicationDPUSizesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListApplicationDPUSizes page. +func (p *ListApplicationDPUSizesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListApplicationDPUSizesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListApplicationDPUSizes(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListApplicationDPUSizesAPIClient is a client that implements the +// ListApplicationDPUSizes operation. +type ListApplicationDPUSizesAPIClient interface { + ListApplicationDPUSizes(context.Context, *ListApplicationDPUSizesInput, ...func(*Options)) (*ListApplicationDPUSizesOutput, error) +} + +var _ ListApplicationDPUSizesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListApplicationDPUSizes(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListApplicationDPUSizes", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListCalculationExecutions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListCalculationExecutions.go new file mode 100644 index 00000000..8c17aab2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListCalculationExecutions.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the calculations that have been submitted to a session in descending +// order. Newer calculations are listed first; older calculations are listed later. +func (c *Client) ListCalculationExecutions(ctx context.Context, params *ListCalculationExecutionsInput, optFns ...func(*Options)) (*ListCalculationExecutionsOutput, error) { + if params == nil { + params = &ListCalculationExecutionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListCalculationExecutions", params, optFns, c.addOperationListCalculationExecutionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListCalculationExecutionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListCalculationExecutionsInput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + // The maximum number of calculation executions to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // A filter for a specific calculation execution state. A description of each + // state follows. + // + // CREATING - The calculation is in the process of being created. + // + // CREATED - The calculation has been created and is ready to run. + // + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + StateFilter types.CalculationExecutionState + + noSmithyDocumentSerde +} + +type ListCalculationExecutionsOutput struct { + + // A list of CalculationSummary objects. + Calculations []types.CalculationSummary + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListCalculationExecutionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListCalculationExecutions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListCalculationExecutions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListCalculationExecutions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListCalculationExecutionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListCalculationExecutions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListCalculationExecutionsPaginatorOptions is the paginator options for +// ListCalculationExecutions +type ListCalculationExecutionsPaginatorOptions struct { + // The maximum number of calculation executions to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListCalculationExecutionsPaginator is a paginator for ListCalculationExecutions +type ListCalculationExecutionsPaginator struct { + options ListCalculationExecutionsPaginatorOptions + client ListCalculationExecutionsAPIClient + params *ListCalculationExecutionsInput + nextToken *string + firstPage bool +} + +// NewListCalculationExecutionsPaginator returns a new +// ListCalculationExecutionsPaginator +func NewListCalculationExecutionsPaginator(client ListCalculationExecutionsAPIClient, params *ListCalculationExecutionsInput, optFns ...func(*ListCalculationExecutionsPaginatorOptions)) *ListCalculationExecutionsPaginator { + if params == nil { + params = &ListCalculationExecutionsInput{} + } + + options := ListCalculationExecutionsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListCalculationExecutionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListCalculationExecutionsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListCalculationExecutions page. +func (p *ListCalculationExecutionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListCalculationExecutionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListCalculationExecutions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListCalculationExecutionsAPIClient is a client that implements the +// ListCalculationExecutions operation. +type ListCalculationExecutionsAPIClient interface { + ListCalculationExecutions(context.Context, *ListCalculationExecutionsInput, ...func(*Options)) (*ListCalculationExecutionsOutput, error) +} + +var _ ListCalculationExecutionsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListCalculationExecutions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListCalculationExecutions", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListCapacityReservations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListCapacityReservations.go new file mode 100644 index 00000000..b17680d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListCapacityReservations.go @@ -0,0 +1,261 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the capacity reservations for the current account. +func (c *Client) ListCapacityReservations(ctx context.Context, params *ListCapacityReservationsInput, optFns ...func(*Options)) (*ListCapacityReservationsOutput, error) { + if params == nil { + params = &ListCapacityReservationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListCapacityReservations", params, optFns, c.addOperationListCapacityReservationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListCapacityReservationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListCapacityReservationsInput struct { + + // Specifies the maximum number of results to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. + NextToken *string + + noSmithyDocumentSerde +} + +type ListCapacityReservationsOutput struct { + + // The capacity reservations for the current account. + // + // This member is required. + CapacityReservations []types.CapacityReservation + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListCapacityReservationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListCapacityReservations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListCapacityReservations{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListCapacityReservations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListCapacityReservations(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListCapacityReservationsPaginatorOptions is the paginator options for +// ListCapacityReservations +type ListCapacityReservationsPaginatorOptions struct { + // Specifies the maximum number of results to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListCapacityReservationsPaginator is a paginator for ListCapacityReservations +type ListCapacityReservationsPaginator struct { + options ListCapacityReservationsPaginatorOptions + client ListCapacityReservationsAPIClient + params *ListCapacityReservationsInput + nextToken *string + firstPage bool +} + +// NewListCapacityReservationsPaginator returns a new +// ListCapacityReservationsPaginator +func NewListCapacityReservationsPaginator(client ListCapacityReservationsAPIClient, params *ListCapacityReservationsInput, optFns ...func(*ListCapacityReservationsPaginatorOptions)) *ListCapacityReservationsPaginator { + if params == nil { + params = &ListCapacityReservationsInput{} + } + + options := ListCapacityReservationsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListCapacityReservationsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListCapacityReservationsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListCapacityReservations page. +func (p *ListCapacityReservationsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListCapacityReservationsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListCapacityReservations(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListCapacityReservationsAPIClient is a client that implements the +// ListCapacityReservations operation. +type ListCapacityReservationsAPIClient interface { + ListCapacityReservations(context.Context, *ListCapacityReservationsInput, ...func(*Options)) (*ListCapacityReservationsOutput, error) +} + +var _ ListCapacityReservationsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListCapacityReservations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListCapacityReservations", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListDataCatalogs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListDataCatalogs.go new file mode 100644 index 00000000..7b6bed79 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListDataCatalogs.go @@ -0,0 +1,264 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the data catalogs in the current Amazon Web Services account. +// +// In the Athena console, data catalogs are listed as "data sources" on the Data +// sources page under the Data source name column. +func (c *Client) ListDataCatalogs(ctx context.Context, params *ListDataCatalogsInput, optFns ...func(*Options)) (*ListDataCatalogsOutput, error) { + if params == nil { + params = &ListDataCatalogsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDataCatalogs", params, optFns, c.addOperationListDataCatalogsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDataCatalogsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListDataCatalogsInput struct { + + // Specifies the maximum number of data catalogs to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The name of the workgroup. Required if making an IAM Identity Center request. + WorkGroup *string + + noSmithyDocumentSerde +} + +type ListDataCatalogsOutput struct { + + // A summary list of data catalogs. + DataCatalogsSummary []types.DataCatalogSummary + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDataCatalogsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListDataCatalogs{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListDataCatalogs{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDataCatalogs"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDataCatalogs(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListDataCatalogsPaginatorOptions is the paginator options for ListDataCatalogs +type ListDataCatalogsPaginatorOptions struct { + // Specifies the maximum number of data catalogs to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDataCatalogsPaginator is a paginator for ListDataCatalogs +type ListDataCatalogsPaginator struct { + options ListDataCatalogsPaginatorOptions + client ListDataCatalogsAPIClient + params *ListDataCatalogsInput + nextToken *string + firstPage bool +} + +// NewListDataCatalogsPaginator returns a new ListDataCatalogsPaginator +func NewListDataCatalogsPaginator(client ListDataCatalogsAPIClient, params *ListDataCatalogsInput, optFns ...func(*ListDataCatalogsPaginatorOptions)) *ListDataCatalogsPaginator { + if params == nil { + params = &ListDataCatalogsInput{} + } + + options := ListDataCatalogsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDataCatalogsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDataCatalogsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDataCatalogs page. +func (p *ListDataCatalogsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDataCatalogsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListDataCatalogs(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListDataCatalogsAPIClient is a client that implements the ListDataCatalogs +// operation. +type ListDataCatalogsAPIClient interface { + ListDataCatalogs(context.Context, *ListDataCatalogsInput, ...func(*Options)) (*ListDataCatalogsOutput, error) +} + +var _ ListDataCatalogsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListDataCatalogs(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDataCatalogs", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListDatabases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListDatabases.go new file mode 100644 index 00000000..fe73adec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListDatabases.go @@ -0,0 +1,269 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the databases in the specified data catalog. +func (c *Client) ListDatabases(ctx context.Context, params *ListDatabasesInput, optFns ...func(*Options)) (*ListDatabasesOutput, error) { + if params == nil { + params = &ListDatabasesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDatabases", params, optFns, c.addOperationListDatabasesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDatabasesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListDatabasesInput struct { + + // The name of the data catalog that contains the databases to return. + // + // This member is required. + CatalogName *string + + // Specifies the maximum number of results to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The name of the workgroup for which the metadata is being fetched. Required if + // requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string + + noSmithyDocumentSerde +} + +type ListDatabasesOutput struct { + + // A list of databases from a data catalog. + DatabaseList []types.Database + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDatabasesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListDatabases{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListDatabases{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDatabases"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListDatabasesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDatabases(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListDatabasesPaginatorOptions is the paginator options for ListDatabases +type ListDatabasesPaginatorOptions struct { + // Specifies the maximum number of results to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDatabasesPaginator is a paginator for ListDatabases +type ListDatabasesPaginator struct { + options ListDatabasesPaginatorOptions + client ListDatabasesAPIClient + params *ListDatabasesInput + nextToken *string + firstPage bool +} + +// NewListDatabasesPaginator returns a new ListDatabasesPaginator +func NewListDatabasesPaginator(client ListDatabasesAPIClient, params *ListDatabasesInput, optFns ...func(*ListDatabasesPaginatorOptions)) *ListDatabasesPaginator { + if params == nil { + params = &ListDatabasesInput{} + } + + options := ListDatabasesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDatabasesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDatabasesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDatabases page. +func (p *ListDatabasesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDatabasesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListDatabases(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListDatabasesAPIClient is a client that implements the ListDatabases operation. +type ListDatabasesAPIClient interface { + ListDatabases(context.Context, *ListDatabasesInput, ...func(*Options)) (*ListDatabasesOutput, error) +} + +var _ ListDatabasesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListDatabases(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDatabases", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListEngineVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListEngineVersions.go new file mode 100644 index 00000000..31ef68df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListEngineVersions.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of engine versions that are available to choose from, including +// the Auto option. +func (c *Client) ListEngineVersions(ctx context.Context, params *ListEngineVersionsInput, optFns ...func(*Options)) (*ListEngineVersionsOutput, error) { + if params == nil { + params = &ListEngineVersionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListEngineVersions", params, optFns, c.addOperationListEngineVersionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListEngineVersionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListEngineVersionsInput struct { + + // The maximum number of engine versions to return in this request. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListEngineVersionsOutput struct { + + // A list of engine versions that are available to choose from. + EngineVersions []types.EngineVersion + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListEngineVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListEngineVersions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListEngineVersions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListEngineVersions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListEngineVersions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListEngineVersionsPaginatorOptions is the paginator options for +// ListEngineVersions +type ListEngineVersionsPaginatorOptions struct { + // The maximum number of engine versions to return in this request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListEngineVersionsPaginator is a paginator for ListEngineVersions +type ListEngineVersionsPaginator struct { + options ListEngineVersionsPaginatorOptions + client ListEngineVersionsAPIClient + params *ListEngineVersionsInput + nextToken *string + firstPage bool +} + +// NewListEngineVersionsPaginator returns a new ListEngineVersionsPaginator +func NewListEngineVersionsPaginator(client ListEngineVersionsAPIClient, params *ListEngineVersionsInput, optFns ...func(*ListEngineVersionsPaginatorOptions)) *ListEngineVersionsPaginator { + if params == nil { + params = &ListEngineVersionsInput{} + } + + options := ListEngineVersionsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListEngineVersionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListEngineVersionsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListEngineVersions page. +func (p *ListEngineVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListEngineVersionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListEngineVersions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListEngineVersionsAPIClient is a client that implements the ListEngineVersions +// operation. +type ListEngineVersionsAPIClient interface { + ListEngineVersions(context.Context, *ListEngineVersionsInput, ...func(*Options)) (*ListEngineVersionsOutput, error) +} + +var _ ListEngineVersionsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListEngineVersions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListEngineVersions", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListExecutors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListExecutors.go new file mode 100644 index 00000000..c7604428 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListExecutors.go @@ -0,0 +1,287 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists, in descending order, the executors that joined a session. Newer +// executors are listed first; older executors are listed later. The result can be +// optionally filtered by state. +func (c *Client) ListExecutors(ctx context.Context, params *ListExecutorsInput, optFns ...func(*Options)) (*ListExecutorsOutput, error) { + if params == nil { + params = &ListExecutorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListExecutors", params, optFns, c.addOperationListExecutorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListExecutorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListExecutorsInput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + // A filter for a specific executor state. A description of each state follows. + // + // CREATING - The executor is being started, including acquiring resources. + // + // CREATED - The executor has been started. + // + // REGISTERED - The executor has been registered. + // + // TERMINATING - The executor is in the process of shutting down. + // + // TERMINATED - The executor is no longer running. + // + // FAILED - Due to a failure, the executor is no longer running. + ExecutorStateFilter types.ExecutorState + + // The maximum number of executors to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListExecutorsOutput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + // Contains summary information about the executor. + ExecutorsSummary []types.ExecutorsSummary + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListExecutorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListExecutors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListExecutors{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListExecutors"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListExecutorsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListExecutors(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListExecutorsPaginatorOptions is the paginator options for ListExecutors +type ListExecutorsPaginatorOptions struct { + // The maximum number of executors to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListExecutorsPaginator is a paginator for ListExecutors +type ListExecutorsPaginator struct { + options ListExecutorsPaginatorOptions + client ListExecutorsAPIClient + params *ListExecutorsInput + nextToken *string + firstPage bool +} + +// NewListExecutorsPaginator returns a new ListExecutorsPaginator +func NewListExecutorsPaginator(client ListExecutorsAPIClient, params *ListExecutorsInput, optFns ...func(*ListExecutorsPaginatorOptions)) *ListExecutorsPaginator { + if params == nil { + params = &ListExecutorsInput{} + } + + options := ListExecutorsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListExecutorsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListExecutorsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListExecutors page. +func (p *ListExecutorsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListExecutorsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListExecutors(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListExecutorsAPIClient is a client that implements the ListExecutors operation. +type ListExecutorsAPIClient interface { + ListExecutors(context.Context, *ListExecutorsInput, ...func(*Options)) (*ListExecutorsOutput, error) +} + +var _ ListExecutorsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListExecutors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListExecutors", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNamedQueries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNamedQueries.go new file mode 100644 index 00000000..c013f9fa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNamedQueries.go @@ -0,0 +1,264 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Provides a list of available query IDs only for queries saved in the specified +// workgroup. Requires that you have access to the specified workgroup. If a +// workgroup is not specified, lists the saved queries for the primary workgroup. +func (c *Client) ListNamedQueries(ctx context.Context, params *ListNamedQueriesInput, optFns ...func(*Options)) (*ListNamedQueriesOutput, error) { + if params == nil { + params = &ListNamedQueriesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListNamedQueries", params, optFns, c.addOperationListNamedQueriesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListNamedQueriesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListNamedQueriesInput struct { + + // The maximum number of queries to return in this request. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The name of the workgroup from which the named queries are being returned. If a + // workgroup is not specified, the saved queries for the primary workgroup are + // returned. + WorkGroup *string + + noSmithyDocumentSerde +} + +type ListNamedQueriesOutput struct { + + // The list of unique query IDs. + NamedQueryIds []string + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListNamedQueriesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListNamedQueries{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListNamedQueries{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListNamedQueries"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListNamedQueries(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListNamedQueriesPaginatorOptions is the paginator options for ListNamedQueries +type ListNamedQueriesPaginatorOptions struct { + // The maximum number of queries to return in this request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListNamedQueriesPaginator is a paginator for ListNamedQueries +type ListNamedQueriesPaginator struct { + options ListNamedQueriesPaginatorOptions + client ListNamedQueriesAPIClient + params *ListNamedQueriesInput + nextToken *string + firstPage bool +} + +// NewListNamedQueriesPaginator returns a new ListNamedQueriesPaginator +func NewListNamedQueriesPaginator(client ListNamedQueriesAPIClient, params *ListNamedQueriesInput, optFns ...func(*ListNamedQueriesPaginatorOptions)) *ListNamedQueriesPaginator { + if params == nil { + params = &ListNamedQueriesInput{} + } + + options := ListNamedQueriesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListNamedQueriesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListNamedQueriesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListNamedQueries page. +func (p *ListNamedQueriesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListNamedQueriesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListNamedQueries(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListNamedQueriesAPIClient is a client that implements the ListNamedQueries +// operation. +type ListNamedQueriesAPIClient interface { + ListNamedQueries(context.Context, *ListNamedQueriesInput, ...func(*Options)) (*ListNamedQueriesOutput, error) +} + +var _ ListNamedQueriesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListNamedQueries(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListNamedQueries", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNotebookMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNotebookMetadata.go new file mode 100644 index 00000000..b796bd2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNotebookMetadata.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Displays the notebook files for the specified workgroup in paginated format. +func (c *Client) ListNotebookMetadata(ctx context.Context, params *ListNotebookMetadataInput, optFns ...func(*Options)) (*ListNotebookMetadataOutput, error) { + if params == nil { + params = &ListNotebookMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListNotebookMetadata", params, optFns, c.addOperationListNotebookMetadataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListNotebookMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListNotebookMetadataInput struct { + + // The name of the Spark enabled workgroup to retrieve notebook metadata for. + // + // This member is required. + WorkGroup *string + + // Search filter string. + Filters *types.FilterDefinition + + // Specifies the maximum number of results to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. + NextToken *string + + noSmithyDocumentSerde +} + +type ListNotebookMetadataOutput struct { + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The list of notebook metadata for the specified workgroup. + NotebookMetadataList []types.NotebookMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListNotebookMetadataMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListNotebookMetadata{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListNotebookMetadata{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListNotebookMetadata"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListNotebookMetadataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListNotebookMetadata(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListNotebookMetadata(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListNotebookMetadata", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNotebookSessions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNotebookSessions.go new file mode 100644 index 00000000..182a5b7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListNotebookSessions.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists, in descending order, the sessions that have been created in a notebook +// that are in an active state like CREATING , CREATED , IDLE or BUSY . Newer +// sessions are listed first; older sessions are listed later. +func (c *Client) ListNotebookSessions(ctx context.Context, params *ListNotebookSessionsInput, optFns ...func(*Options)) (*ListNotebookSessionsOutput, error) { + if params == nil { + params = &ListNotebookSessionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListNotebookSessions", params, optFns, c.addOperationListNotebookSessionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListNotebookSessionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListNotebookSessionsInput struct { + + // The ID of the notebook to list sessions for. + // + // This member is required. + NotebookId *string + + // The maximum number of notebook sessions to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListNotebookSessionsOutput struct { + + // A list of the sessions belonging to the notebook. + // + // This member is required. + NotebookSessionsList []types.NotebookSessionSummary + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListNotebookSessionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListNotebookSessions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListNotebookSessions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListNotebookSessions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListNotebookSessionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListNotebookSessions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListNotebookSessions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListNotebookSessions", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListPreparedStatements.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListPreparedStatements.go new file mode 100644 index 00000000..42ea7954 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListPreparedStatements.go @@ -0,0 +1,267 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the prepared statements in the specified workgroup. +func (c *Client) ListPreparedStatements(ctx context.Context, params *ListPreparedStatementsInput, optFns ...func(*Options)) (*ListPreparedStatementsOutput, error) { + if params == nil { + params = &ListPreparedStatementsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListPreparedStatements", params, optFns, c.addOperationListPreparedStatementsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListPreparedStatementsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListPreparedStatementsInput struct { + + // The workgroup to list the prepared statements for. + // + // This member is required. + WorkGroup *string + + // The maximum number of results to return in this request. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListPreparedStatementsOutput struct { + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The list of prepared statements for the workgroup. + PreparedStatements []types.PreparedStatementSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListPreparedStatementsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListPreparedStatements{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListPreparedStatements{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListPreparedStatements"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListPreparedStatementsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListPreparedStatements(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListPreparedStatementsPaginatorOptions is the paginator options for +// ListPreparedStatements +type ListPreparedStatementsPaginatorOptions struct { + // The maximum number of results to return in this request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListPreparedStatementsPaginator is a paginator for ListPreparedStatements +type ListPreparedStatementsPaginator struct { + options ListPreparedStatementsPaginatorOptions + client ListPreparedStatementsAPIClient + params *ListPreparedStatementsInput + nextToken *string + firstPage bool +} + +// NewListPreparedStatementsPaginator returns a new ListPreparedStatementsPaginator +func NewListPreparedStatementsPaginator(client ListPreparedStatementsAPIClient, params *ListPreparedStatementsInput, optFns ...func(*ListPreparedStatementsPaginatorOptions)) *ListPreparedStatementsPaginator { + if params == nil { + params = &ListPreparedStatementsInput{} + } + + options := ListPreparedStatementsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListPreparedStatementsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListPreparedStatementsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListPreparedStatements page. +func (p *ListPreparedStatementsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPreparedStatementsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListPreparedStatements(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListPreparedStatementsAPIClient is a client that implements the +// ListPreparedStatements operation. +type ListPreparedStatementsAPIClient interface { + ListPreparedStatements(context.Context, *ListPreparedStatementsInput, ...func(*Options)) (*ListPreparedStatementsOutput, error) +} + +var _ ListPreparedStatementsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListPreparedStatements(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListPreparedStatements", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListQueryExecutions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListQueryExecutions.go new file mode 100644 index 00000000..b5bd5c9a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListQueryExecutions.go @@ -0,0 +1,264 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Provides a list of available query execution IDs for the queries in the +// specified workgroup. Athena keeps a query history for 45 days. If a workgroup is +// not specified, returns a list of query execution IDs for the primary workgroup. +// Requires you to have access to the workgroup in which the queries ran. +func (c *Client) ListQueryExecutions(ctx context.Context, params *ListQueryExecutionsInput, optFns ...func(*Options)) (*ListQueryExecutionsOutput, error) { + if params == nil { + params = &ListQueryExecutionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListQueryExecutions", params, optFns, c.addOperationListQueryExecutionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListQueryExecutionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListQueryExecutionsInput struct { + + // The maximum number of query executions to return in this request. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The name of the workgroup from which queries are being returned. If a workgroup + // is not specified, a list of available query execution IDs for the queries in the + // primary workgroup is returned. + WorkGroup *string + + noSmithyDocumentSerde +} + +type ListQueryExecutionsOutput struct { + + // A token to be used by the next request if this request is truncated. + NextToken *string + + // The unique IDs of each query execution as an array of strings. + QueryExecutionIds []string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListQueryExecutionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListQueryExecutions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListQueryExecutions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListQueryExecutions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListQueryExecutions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListQueryExecutionsPaginatorOptions is the paginator options for +// ListQueryExecutions +type ListQueryExecutionsPaginatorOptions struct { + // The maximum number of query executions to return in this request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListQueryExecutionsPaginator is a paginator for ListQueryExecutions +type ListQueryExecutionsPaginator struct { + options ListQueryExecutionsPaginatorOptions + client ListQueryExecutionsAPIClient + params *ListQueryExecutionsInput + nextToken *string + firstPage bool +} + +// NewListQueryExecutionsPaginator returns a new ListQueryExecutionsPaginator +func NewListQueryExecutionsPaginator(client ListQueryExecutionsAPIClient, params *ListQueryExecutionsInput, optFns ...func(*ListQueryExecutionsPaginatorOptions)) *ListQueryExecutionsPaginator { + if params == nil { + params = &ListQueryExecutionsInput{} + } + + options := ListQueryExecutionsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListQueryExecutionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListQueryExecutionsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListQueryExecutions page. +func (p *ListQueryExecutionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListQueryExecutionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListQueryExecutions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListQueryExecutionsAPIClient is a client that implements the +// ListQueryExecutions operation. +type ListQueryExecutionsAPIClient interface { + ListQueryExecutions(context.Context, *ListQueryExecutionsInput, ...func(*Options)) (*ListQueryExecutionsOutput, error) +} + +var _ ListQueryExecutionsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListQueryExecutions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListQueryExecutions", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListSessions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListSessions.go new file mode 100644 index 00000000..6ec02e8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListSessions.go @@ -0,0 +1,287 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the sessions in a workgroup that are in an active state like CREATING , +// CREATED , IDLE , or BUSY . Newer sessions are listed first; older sessions are +// listed later. +func (c *Client) ListSessions(ctx context.Context, params *ListSessionsInput, optFns ...func(*Options)) (*ListSessionsOutput, error) { + if params == nil { + params = &ListSessionsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListSessions", params, optFns, c.addOperationListSessionsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListSessionsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListSessionsInput struct { + + // The workgroup to which the session belongs. + // + // This member is required. + WorkGroup *string + + // The maximum number of sessions to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // A filter for a specific session state. A description of each state follows. + // + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a + // calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + StateFilter types.SessionState + + noSmithyDocumentSerde +} + +type ListSessionsOutput struct { + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // A list of sessions. + Sessions []types.SessionSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListSessionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListSessions{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListSessions{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListSessions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListSessionsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSessions(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListSessionsPaginatorOptions is the paginator options for ListSessions +type ListSessionsPaginatorOptions struct { + // The maximum number of sessions to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListSessionsPaginator is a paginator for ListSessions +type ListSessionsPaginator struct { + options ListSessionsPaginatorOptions + client ListSessionsAPIClient + params *ListSessionsInput + nextToken *string + firstPage bool +} + +// NewListSessionsPaginator returns a new ListSessionsPaginator +func NewListSessionsPaginator(client ListSessionsAPIClient, params *ListSessionsInput, optFns ...func(*ListSessionsPaginatorOptions)) *ListSessionsPaginator { + if params == nil { + params = &ListSessionsInput{} + } + + options := ListSessionsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListSessionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListSessionsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListSessions page. +func (p *ListSessionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListSessionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListSessions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListSessionsAPIClient is a client that implements the ListSessions operation. +type ListSessionsAPIClient interface { + ListSessions(context.Context, *ListSessionsInput, ...func(*Options)) (*ListSessionsOutput, error) +} + +var _ ListSessionsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListSessions(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListSessions", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListTableMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListTableMetadata.go new file mode 100644 index 00000000..aa31ffff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListTableMetadata.go @@ -0,0 +1,279 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the metadata for the tables in the specified data catalog database. +func (c *Client) ListTableMetadata(ctx context.Context, params *ListTableMetadataInput, optFns ...func(*Options)) (*ListTableMetadataOutput, error) { + if params == nil { + params = &ListTableMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTableMetadata", params, optFns, c.addOperationListTableMetadataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTableMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTableMetadataInput struct { + + // The name of the data catalog for which table metadata should be returned. + // + // This member is required. + CatalogName *string + + // The name of the database for which table metadata should be returned. + // + // This member is required. + DatabaseName *string + + // A regex filter that pattern-matches table names. If no expression is supplied, + // metadata for all tables are listed. + Expression *string + + // Specifies the maximum number of results to return. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // The name of the workgroup for which the metadata is being fetched. Required if + // requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string + + noSmithyDocumentSerde +} + +type ListTableMetadataOutput struct { + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // A list of table metadata. + TableMetadataList []types.TableMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTableMetadataMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTableMetadata{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTableMetadata{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTableMetadata"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListTableMetadataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTableMetadata(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListTableMetadataPaginatorOptions is the paginator options for ListTableMetadata +type ListTableMetadataPaginatorOptions struct { + // Specifies the maximum number of results to return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTableMetadataPaginator is a paginator for ListTableMetadata +type ListTableMetadataPaginator struct { + options ListTableMetadataPaginatorOptions + client ListTableMetadataAPIClient + params *ListTableMetadataInput + nextToken *string + firstPage bool +} + +// NewListTableMetadataPaginator returns a new ListTableMetadataPaginator +func NewListTableMetadataPaginator(client ListTableMetadataAPIClient, params *ListTableMetadataInput, optFns ...func(*ListTableMetadataPaginatorOptions)) *ListTableMetadataPaginator { + if params == nil { + params = &ListTableMetadataInput{} + } + + options := ListTableMetadataPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTableMetadataPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTableMetadataPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTableMetadata page. +func (p *ListTableMetadataPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTableMetadataOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListTableMetadata(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListTableMetadataAPIClient is a client that implements the ListTableMetadata +// operation. +type ListTableMetadataAPIClient interface { + ListTableMetadata(context.Context, *ListTableMetadataInput, ...func(*Options)) (*ListTableMetadataOutput, error) +} + +var _ ListTableMetadataAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListTableMetadata(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTableMetadata", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListTagsForResource.go new file mode 100644 index 00000000..601cb1d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListTagsForResource.go @@ -0,0 +1,267 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists the tags associated with an Athena resource. +func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if params == nil { + params = &ListTagsForResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsForResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagsForResourceInput struct { + + // Lists the tags for the resource with the specified ARN. + // + // This member is required. + ResourceARN *string + + // The maximum number of results to be returned per request that lists the tags + // for the resource. + MaxResults *int32 + + // The token for the next set of results, or null if there are no additional + // results for this request, where the request lists the tags for the resource with + // the specified ARN. + NextToken *string + + noSmithyDocumentSerde +} + +type ListTagsForResourceOutput struct { + + // A token to be used by the next request if this request is truncated. + NextToken *string + + // The list of tags associated with the specified resource. + Tags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsForResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListTagsForResourcePaginatorOptions is the paginator options for +// ListTagsForResource +type ListTagsForResourcePaginatorOptions struct { + // The maximum number of results to be returned per request that lists the tags + // for the resource. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTagsForResourcePaginator is a paginator for ListTagsForResource +type ListTagsForResourcePaginator struct { + options ListTagsForResourcePaginatorOptions + client ListTagsForResourceAPIClient + params *ListTagsForResourceInput + nextToken *string + firstPage bool +} + +// NewListTagsForResourcePaginator returns a new ListTagsForResourcePaginator +func NewListTagsForResourcePaginator(client ListTagsForResourceAPIClient, params *ListTagsForResourceInput, optFns ...func(*ListTagsForResourcePaginatorOptions)) *ListTagsForResourcePaginator { + if params == nil { + params = &ListTagsForResourceInput{} + } + + options := ListTagsForResourcePaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTagsForResourcePaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTagsForResourcePaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTagsForResource page. +func (p *ListTagsForResourcePaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListTagsForResource(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListTagsForResourceAPIClient is a client that implements the +// ListTagsForResource operation. +type ListTagsForResourceAPIClient interface { + ListTagsForResource(context.Context, *ListTagsForResourceInput, ...func(*Options)) (*ListTagsForResourceOutput, error) +} + +var _ ListTagsForResourceAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTagsForResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListWorkGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListWorkGroups.go new file mode 100644 index 00000000..9187fd19 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_ListWorkGroups.go @@ -0,0 +1,259 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists available workgroups for the account. +func (c *Client) ListWorkGroups(ctx context.Context, params *ListWorkGroupsInput, optFns ...func(*Options)) (*ListWorkGroupsOutput, error) { + if params == nil { + params = &ListWorkGroupsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListWorkGroups", params, optFns, c.addOperationListWorkGroupsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListWorkGroupsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListWorkGroupsInput struct { + + // The maximum number of workgroups to return in this request. + MaxResults *int32 + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + noSmithyDocumentSerde +} + +type ListWorkGroupsOutput struct { + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of pages, + // pass in the NextToken from the response object of the previous page call. + NextToken *string + + // A list of WorkGroupSummary objects that include the names, descriptions, creation times, and + // states for each workgroup. + WorkGroups []types.WorkGroupSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListWorkGroupsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListWorkGroups{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListWorkGroups{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListWorkGroups"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListWorkGroups(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListWorkGroupsPaginatorOptions is the paginator options for ListWorkGroups +type ListWorkGroupsPaginatorOptions struct { + // The maximum number of workgroups to return in this request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListWorkGroupsPaginator is a paginator for ListWorkGroups +type ListWorkGroupsPaginator struct { + options ListWorkGroupsPaginatorOptions + client ListWorkGroupsAPIClient + params *ListWorkGroupsInput + nextToken *string + firstPage bool +} + +// NewListWorkGroupsPaginator returns a new ListWorkGroupsPaginator +func NewListWorkGroupsPaginator(client ListWorkGroupsAPIClient, params *ListWorkGroupsInput, optFns ...func(*ListWorkGroupsPaginatorOptions)) *ListWorkGroupsPaginator { + if params == nil { + params = &ListWorkGroupsInput{} + } + + options := ListWorkGroupsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListWorkGroupsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListWorkGroupsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListWorkGroups page. +func (p *ListWorkGroupsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListWorkGroupsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListWorkGroups(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListWorkGroupsAPIClient is a client that implements the ListWorkGroups +// operation. +type ListWorkGroupsAPIClient interface { + ListWorkGroups(context.Context, *ListWorkGroupsInput, ...func(*Options)) (*ListWorkGroupsOutput, error) +} + +var _ ListWorkGroupsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListWorkGroups(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListWorkGroups", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_PutCapacityAssignmentConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_PutCapacityAssignmentConfiguration.go new file mode 100644 index 00000000..80b5a11b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_PutCapacityAssignmentConfiguration.go @@ -0,0 +1,164 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Puts a new capacity assignment configuration for a specified capacity +// reservation. If a capacity assignment configuration already exists for the +// capacity reservation, replaces the existing capacity assignment configuration. +func (c *Client) PutCapacityAssignmentConfiguration(ctx context.Context, params *PutCapacityAssignmentConfigurationInput, optFns ...func(*Options)) (*PutCapacityAssignmentConfigurationOutput, error) { + if params == nil { + params = &PutCapacityAssignmentConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutCapacityAssignmentConfiguration", params, optFns, c.addOperationPutCapacityAssignmentConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutCapacityAssignmentConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutCapacityAssignmentConfigurationInput struct { + + // The list of assignments for the capacity assignment configuration. + // + // This member is required. + CapacityAssignments []types.CapacityAssignment + + // The name of the capacity reservation to put a capacity assignment configuration + // for. + // + // This member is required. + CapacityReservationName *string + + noSmithyDocumentSerde +} + +type PutCapacityAssignmentConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutCapacityAssignmentConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutCapacityAssignmentConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutCapacityAssignmentConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutCapacityAssignmentConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpPutCapacityAssignmentConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutCapacityAssignmentConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutCapacityAssignmentConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutCapacityAssignmentConfiguration", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartCalculationExecution.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartCalculationExecution.go new file mode 100644 index 00000000..c9b5266b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartCalculationExecution.go @@ -0,0 +1,207 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Submits calculations for execution within a session. You can supply the code to +// run as an inline code block within the request. +// +// The request syntax requires the StartCalculationExecutionRequest$CodeBlock parameter or the CalculationConfiguration$CodeBlock parameter, but not both. +// Because CalculationConfiguration$CodeBlockis deprecated, use the StartCalculationExecutionRequest$CodeBlock parameter instead. +func (c *Client) StartCalculationExecution(ctx context.Context, params *StartCalculationExecutionInput, optFns ...func(*Options)) (*StartCalculationExecutionOutput, error) { + if params == nil { + params = &StartCalculationExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartCalculationExecution", params, optFns, c.addOperationStartCalculationExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartCalculationExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartCalculationExecutionInput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + // Contains configuration information for the calculation. + // + // Deprecated: Kepler Post GA Tasks : https://sim.amazon.com/issues/ATHENA-39828 + CalculationConfiguration *types.CalculationConfiguration + + // A unique case-sensitive string used to ensure the request to create the + // calculation is idempotent (executes only once). If another + // StartCalculationExecutionRequest is received, the same response is returned and + // another calculation is not created. If a parameter has changed, an error is + // returned. + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for users. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + // A string that contains the code of the calculation. Use this parameter instead + // of CalculationConfiguration$CodeBlock, which is deprecated. + CodeBlock *string + + // A description of the calculation. + Description *string + + noSmithyDocumentSerde +} + +type StartCalculationExecutionOutput struct { + + // The calculation execution UUID. + CalculationExecutionId *string + + // CREATING - The calculation is in the process of being created. + // + // CREATED - The calculation has been created and is ready to run. + // + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + State types.CalculationExecutionState + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartCalculationExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartCalculationExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartCalculationExecution{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartCalculationExecution"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpStartCalculationExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartCalculationExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartCalculationExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartCalculationExecution", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartQueryExecution.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartQueryExecution.go new file mode 100644 index 00000000..ea5cb628 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartQueryExecution.go @@ -0,0 +1,236 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Runs the SQL query statements contained in the Query . Requires you to have +// access to the workgroup in which the query ran. Running queries against an +// external catalog requires GetDataCatalogpermission to the catalog. For code samples using the +// Amazon Web Services SDK for Java, see [Examples and Code Samples]in the Amazon Athena User Guide. +// +// [Examples and Code Samples]: http://docs.aws.amazon.com/athena/latest/ug/code-samples.html +func (c *Client) StartQueryExecution(ctx context.Context, params *StartQueryExecutionInput, optFns ...func(*Options)) (*StartQueryExecutionOutput, error) { + if params == nil { + params = &StartQueryExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartQueryExecution", params, optFns, c.addOperationStartQueryExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartQueryExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartQueryExecutionInput struct { + + // The SQL query statements to be executed. + // + // This member is required. + QueryString *string + + // A unique case-sensitive string used to ensure the request to create the query + // is idempotent (executes only once). If another StartQueryExecution request is + // received, the same response is returned and another query is not created. An + // error is returned if a parameter, such as QueryString , has changed. A call to + // StartQueryExecution that uses a previous client request token returns the same + // QueryExecutionId even if the requester doesn't have permission on the tables + // specified in QueryString . + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for users. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + // A list of values for the parameters in a query. The values are applied + // sequentially to the parameters in the query in the order in which the parameters + // occur. + ExecutionParameters []string + + // The database within which the query executes. + QueryExecutionContext *types.QueryExecutionContext + + // Specifies information about where and how to save the results of the query + // execution. If the query runs in a workgroup, then workgroup's settings may + // override query settings. This affects the query results location. The workgroup + // settings override is specified in EnforceWorkGroupConfiguration (true/false) in + // the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + ResultConfiguration *types.ResultConfiguration + + // Specifies the query result reuse behavior for the query. + ResultReuseConfiguration *types.ResultReuseConfiguration + + // The name of the workgroup in which the query is being started. + WorkGroup *string + + noSmithyDocumentSerde +} + +type StartQueryExecutionOutput struct { + + // The unique ID of the query that ran as a result of this request. + QueryExecutionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartQueryExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartQueryExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartQueryExecution{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartQueryExecution"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opStartQueryExecutionMiddleware(stack, options); err != nil { + return err + } + if err = addOpStartQueryExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartQueryExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpStartQueryExecution struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpStartQueryExecution) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpStartQueryExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*StartQueryExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *StartQueryExecutionInput ") + } + + if input.ClientRequestToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientRequestToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opStartQueryExecutionMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpStartQueryExecution{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opStartQueryExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartQueryExecution", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartSession.go new file mode 100644 index 00000000..f7dc42c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StartSession.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a session for running calculations within a workgroup. The session is +// ready when it reaches an IDLE state. +func (c *Client) StartSession(ctx context.Context, params *StartSessionInput, optFns ...func(*Options)) (*StartSessionOutput, error) { + if params == nil { + params = &StartSessionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartSession", params, optFns, c.addOperationStartSessionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartSessionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartSessionInput struct { + + // Contains engine data processing unit (DPU) configuration settings and parameter + // mappings. + // + // This member is required. + EngineConfiguration *types.EngineConfiguration + + // The workgroup to which the session belongs. + // + // This member is required. + WorkGroup *string + + // A unique case-sensitive string used to ensure the request to create the session + // is idempotent (executes only once). If another StartSessionRequest is received, + // the same response is returned and another session is not created. If a parameter + // has changed, an error is returned. + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for users. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + // The session description. + Description *string + + // The notebook version. This value is supplied automatically for notebook + // sessions in the Athena console and is not required for programmatic session + // access. The only valid notebook version is Athena notebook version 1 . If you + // specify a value for NotebookVersion , you must also specify a value for + // NotebookId . See EngineConfiguration$AdditionalConfigs. + NotebookVersion *string + + // The idle timeout in minutes for the session. + SessionIdleTimeoutInMinutes *int32 + + noSmithyDocumentSerde +} + +type StartSessionOutput struct { + + // The session ID. + SessionId *string + + // The state of the session. A description of each state follows. + // + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a + // calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + State types.SessionState + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStartSession{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStartSession{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartSession"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpStartSessionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartSession(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartSession(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartSession", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StopCalculationExecution.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StopCalculationExecution.go new file mode 100644 index 00000000..a99be195 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StopCalculationExecution.go @@ -0,0 +1,183 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Requests the cancellation of a calculation. A StopCalculationExecution call on +// a calculation that is already in a terminal state (for example, STOPPED , FAILED +// , or COMPLETED ) succeeds but has no effect. +// +// Cancelling a calculation is done on a best effort basis. If a calculation +// cannot be cancelled, you can be charged for its completion. If you are concerned +// about being charged for a calculation that cannot be cancelled, consider +// terminating the session in which the calculation is running. +func (c *Client) StopCalculationExecution(ctx context.Context, params *StopCalculationExecutionInput, optFns ...func(*Options)) (*StopCalculationExecutionOutput, error) { + if params == nil { + params = &StopCalculationExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StopCalculationExecution", params, optFns, c.addOperationStopCalculationExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StopCalculationExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StopCalculationExecutionInput struct { + + // The calculation execution UUID. + // + // This member is required. + CalculationExecutionId *string + + noSmithyDocumentSerde +} + +type StopCalculationExecutionOutput struct { + + // CREATING - The calculation is in the process of being created. + // + // CREATED - The calculation has been created and is ready to run. + // + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + State types.CalculationExecutionState + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStopCalculationExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStopCalculationExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStopCalculationExecution{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StopCalculationExecution"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpStopCalculationExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStopCalculationExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStopCalculationExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StopCalculationExecution", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StopQueryExecution.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StopQueryExecution.go new file mode 100644 index 00000000..f0e4c868 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_StopQueryExecution.go @@ -0,0 +1,192 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Stops a query execution. Requires you to have access to the workgroup in which +// the query ran. +func (c *Client) StopQueryExecution(ctx context.Context, params *StopQueryExecutionInput, optFns ...func(*Options)) (*StopQueryExecutionOutput, error) { + if params == nil { + params = &StopQueryExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StopQueryExecution", params, optFns, c.addOperationStopQueryExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StopQueryExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StopQueryExecutionInput struct { + + // The unique ID of the query execution to stop. + // + // This member is required. + QueryExecutionId *string + + noSmithyDocumentSerde +} + +type StopQueryExecutionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStopQueryExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpStopQueryExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpStopQueryExecution{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StopQueryExecution"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opStopQueryExecutionMiddleware(stack, options); err != nil { + return err + } + if err = addOpStopQueryExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStopQueryExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpStopQueryExecution struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpStopQueryExecution) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpStopQueryExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*StopQueryExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *StopQueryExecutionInput ") + } + + if input.QueryExecutionId == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.QueryExecutionId = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opStopQueryExecutionMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpStopQueryExecution{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opStopQueryExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StopQueryExecution", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_TagResource.go new file mode 100644 index 00000000..efc2b635 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_TagResource.go @@ -0,0 +1,174 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds one or more tags to an Athena resource. A tag is a label that you assign +// to a resource. Each tag consists of a key and an optional value, both of which +// you define. For example, you can use tags to categorize Athena workgroups, data +// catalogs, or capacity reservations by purpose, owner, or environment. Use a +// consistent set of tag keys to make it easier to search and filter the resources +// in your account. For best practices, see [Tagging Best Practices]. Tag keys can be from 1 to 128 UTF-8 +// Unicode characters, and tag values can be from 0 to 256 UTF-8 Unicode +// characters. Tags can use letters and numbers representable in UTF-8, and the +// following characters: + - = . _ : / @. Tag keys and values are case-sensitive. +// Tag keys must be unique per resource. If you specify more than one tag, separate +// them by commas. +// +// [Tagging Best Practices]: https://docs.aws.amazon.com/whitepapers/latest/tagging-best-practices/tagging-best-practices.html +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // Specifies the ARN of the Athena resource to which tags are to be added. + // + // This member is required. + ResourceARN *string + + // A collection of one or more tags, separated by commas, to be added to an Athena + // resource. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_TerminateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_TerminateSession.go new file mode 100644 index 00000000..51b9f1f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_TerminateSession.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Terminates an active session. A TerminateSession call on a session that is +// already inactive (for example, in a FAILED , TERMINATED or TERMINATING state) +// succeeds but has no effect. Calculations running in the session when +// TerminateSession is called are forcefully stopped, but may display as FAILED +// instead of STOPPED . +func (c *Client) TerminateSession(ctx context.Context, params *TerminateSessionInput, optFns ...func(*Options)) (*TerminateSessionOutput, error) { + if params == nil { + params = &TerminateSessionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TerminateSession", params, optFns, c.addOperationTerminateSessionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TerminateSessionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TerminateSessionInput struct { + + // The session ID. + // + // This member is required. + SessionId *string + + noSmithyDocumentSerde +} + +type TerminateSessionOutput struct { + + // The state of the session. A description of each state follows. + // + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a + // calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + State types.SessionState + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTerminateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTerminateSession{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTerminateSession{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "TerminateSession"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpTerminateSessionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTerminateSession(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTerminateSession(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TerminateSession", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UntagResource.go new file mode 100644 index 00000000..f61cc9a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UntagResource.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes one or more tags from an Athena resource. +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // Specifies the ARN of the resource from which tags are to be removed. + // + // This member is required. + ResourceARN *string + + // A comma-separated list of one or more tag keys whose tags are to be removed + // from the specified resource. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UntagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateCapacityReservation.go new file mode 100644 index 00000000..fba31fc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateCapacityReservation.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the number of requested data processing units for the capacity +// reservation with the specified name. +func (c *Client) UpdateCapacityReservation(ctx context.Context, params *UpdateCapacityReservationInput, optFns ...func(*Options)) (*UpdateCapacityReservationOutput, error) { + if params == nil { + params = &UpdateCapacityReservationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateCapacityReservation", params, optFns, c.addOperationUpdateCapacityReservationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateCapacityReservationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateCapacityReservationInput struct { + + // The name of the capacity reservation. + // + // This member is required. + Name *string + + // The new number of requested data processing units. + // + // This member is required. + TargetDpus *int32 + + noSmithyDocumentSerde +} + +type UpdateCapacityReservationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateCapacityReservationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateCapacityReservation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateCapacityReservation{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateCapacityReservation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateCapacityReservationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCapacityReservation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateCapacityReservation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateCapacityReservation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateDataCatalog.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateDataCatalog.go new file mode 100644 index 00000000..d9d106fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateDataCatalog.go @@ -0,0 +1,191 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the data catalog that has the specified name. +func (c *Client) UpdateDataCatalog(ctx context.Context, params *UpdateDataCatalogInput, optFns ...func(*Options)) (*UpdateDataCatalogOutput, error) { + if params == nil { + params = &UpdateDataCatalogInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateDataCatalog", params, optFns, c.addOperationUpdateDataCatalogMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateDataCatalogOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateDataCatalogInput struct { + + // The name of the data catalog to update. The catalog name must be unique for the + // Amazon Web Services account and can use a maximum of 127 alphanumeric, + // underscore, at sign, or hyphen characters. The remainder of the length + // constraint of 256 is reserved for use by Athena. + // + // This member is required. + Name *string + + // Specifies the type of data catalog to update. Specify LAMBDA for a federated + // catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. + // + // This member is required. + Type types.DataCatalogType + + // New or modified text that describes the data catalog. + Description *string + + // Specifies the Lambda function or functions to use for updating the data + // catalog. This is a mapping whose values depend on the catalog type. + // + // - For the HIVE data catalog type, use the following syntax. The + // metadata-function parameter is required. The sdk-version parameter is optional + // and defaults to the currently supported version. + // + // metadata-function=lambda_arn, sdk-version=version_number + // + // - For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. + // + // - If you have one Lambda function that processes metadata and another for + // reading the actual data, use the following syntax. Both parameters are required. + // + // metadata-function=lambda_arn, record-function=lambda_arn + // + // - If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. + // + // function=lambda_arn + Parameters map[string]string + + noSmithyDocumentSerde +} + +type UpdateDataCatalogOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateDataCatalogMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateDataCatalog{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateDataCatalog{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateDataCatalog"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateDataCatalogValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateDataCatalog(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateDataCatalog(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateDataCatalog", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNamedQuery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNamedQuery.go new file mode 100644 index 00000000..10a9f9e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNamedQuery.go @@ -0,0 +1,168 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates a NamedQuery object. The database or workgroup cannot be updated. +func (c *Client) UpdateNamedQuery(ctx context.Context, params *UpdateNamedQueryInput, optFns ...func(*Options)) (*UpdateNamedQueryOutput, error) { + if params == nil { + params = &UpdateNamedQueryInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateNamedQuery", params, optFns, c.addOperationUpdateNamedQueryMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateNamedQueryOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateNamedQueryInput struct { + + // The name of the query. + // + // This member is required. + Name *string + + // The unique identifier (UUID) of the query. + // + // This member is required. + NamedQueryId *string + + // The contents of the query with all query statements. + // + // This member is required. + QueryString *string + + // The query description. + Description *string + + noSmithyDocumentSerde +} + +type UpdateNamedQueryOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateNamedQueryMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateNamedQuery{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateNamedQuery{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateNamedQuery"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateNamedQueryValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateNamedQuery(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateNamedQuery(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateNamedQuery", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNotebook.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNotebook.go new file mode 100644 index 00000000..44637cef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNotebook.go @@ -0,0 +1,178 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the contents of a Spark notebook. +func (c *Client) UpdateNotebook(ctx context.Context, params *UpdateNotebookInput, optFns ...func(*Options)) (*UpdateNotebookOutput, error) { + if params == nil { + params = &UpdateNotebookInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateNotebook", params, optFns, c.addOperationUpdateNotebookMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateNotebookOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateNotebookInput struct { + + // The ID of the notebook to update. + // + // This member is required. + NotebookId *string + + // The updated content for the notebook. + // + // This member is required. + Payload *string + + // The notebook content type. Currently, the only valid type is IPYNB . + // + // This member is required. + Type types.NotebookType + + // A unique case-sensitive string used to ensure the request to create the + // notebook is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for you. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + // The active notebook session ID. Required if the notebook has an active session. + SessionId *string + + noSmithyDocumentSerde +} + +type UpdateNotebookOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateNotebookMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateNotebook{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateNotebook{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateNotebook"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateNotebookValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateNotebook(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateNotebook(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateNotebook", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNotebookMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNotebookMetadata.go new file mode 100644 index 00000000..eea77306 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateNotebookMetadata.go @@ -0,0 +1,169 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the metadata for a notebook. +func (c *Client) UpdateNotebookMetadata(ctx context.Context, params *UpdateNotebookMetadataInput, optFns ...func(*Options)) (*UpdateNotebookMetadataOutput, error) { + if params == nil { + params = &UpdateNotebookMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateNotebookMetadata", params, optFns, c.addOperationUpdateNotebookMetadataMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateNotebookMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateNotebookMetadataInput struct { + + // The name to update the notebook to. + // + // This member is required. + Name *string + + // The ID of the notebook to update the metadata for. + // + // This member is required. + NotebookId *string + + // A unique case-sensitive string used to ensure the request to create the + // notebook is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for you. + // If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, + // you must provide this token or the action will fail. + ClientRequestToken *string + + noSmithyDocumentSerde +} + +type UpdateNotebookMetadataOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateNotebookMetadataMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateNotebookMetadata{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateNotebookMetadata{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateNotebookMetadata"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateNotebookMetadataValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateNotebookMetadata(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateNotebookMetadata(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateNotebookMetadata", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdatePreparedStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdatePreparedStatement.go new file mode 100644 index 00000000..193237e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdatePreparedStatement.go @@ -0,0 +1,168 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates a prepared statement. +func (c *Client) UpdatePreparedStatement(ctx context.Context, params *UpdatePreparedStatementInput, optFns ...func(*Options)) (*UpdatePreparedStatementOutput, error) { + if params == nil { + params = &UpdatePreparedStatementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdatePreparedStatement", params, optFns, c.addOperationUpdatePreparedStatementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdatePreparedStatementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdatePreparedStatementInput struct { + + // The query string for the prepared statement. + // + // This member is required. + QueryStatement *string + + // The name of the prepared statement. + // + // This member is required. + StatementName *string + + // The workgroup for the prepared statement. + // + // This member is required. + WorkGroup *string + + // The description of the prepared statement. + Description *string + + noSmithyDocumentSerde +} + +type UpdatePreparedStatementOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdatePreparedStatementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdatePreparedStatement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdatePreparedStatement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdatePreparedStatement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdatePreparedStatementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdatePreparedStatement(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdatePreparedStatement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdatePreparedStatement", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateWorkGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateWorkGroup.go new file mode 100644 index 00000000..e92f51a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/api_op_UpdateWorkGroup.go @@ -0,0 +1,166 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the workgroup with the specified name. The workgroup's name cannot be +// changed. Only ConfigurationUpdates can be specified. +func (c *Client) UpdateWorkGroup(ctx context.Context, params *UpdateWorkGroupInput, optFns ...func(*Options)) (*UpdateWorkGroupOutput, error) { + if params == nil { + params = &UpdateWorkGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateWorkGroup", params, optFns, c.addOperationUpdateWorkGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateWorkGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateWorkGroupInput struct { + + // The specified workgroup that will be updated. + // + // This member is required. + WorkGroup *string + + // Contains configuration updates for an Athena SQL workgroup. + ConfigurationUpdates *types.WorkGroupConfigurationUpdates + + // The workgroup description. + Description *string + + // The workgroup state that will be updated for the given workgroup. + State types.WorkGroupState + + noSmithyDocumentSerde +} + +type UpdateWorkGroupOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateWorkGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateWorkGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateWorkGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateWorkGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpUpdateWorkGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateWorkGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateWorkGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateWorkGroup", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/auth.go new file mode 100644 index 00000000..a773673b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/auth.go @@ -0,0 +1,313 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(ctx, params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "athena") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + + span.End() + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { + options Options +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/deserializers.go new file mode 100644 index 00000000..94f9b195 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/deserializers.go @@ -0,0 +1,16280 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" + "time" +) + +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + +type awsAwsjson11_deserializeOpBatchGetNamedQuery struct { +} + +func (*awsAwsjson11_deserializeOpBatchGetNamedQuery) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchGetNamedQuery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchGetNamedQuery(response, &metadata) + } + output := &BatchGetNamedQueryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchGetNamedQueryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchGetNamedQuery(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpBatchGetPreparedStatement struct { +} + +func (*awsAwsjson11_deserializeOpBatchGetPreparedStatement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchGetPreparedStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchGetPreparedStatement(response, &metadata) + } + output := &BatchGetPreparedStatementOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchGetPreparedStatementOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchGetPreparedStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpBatchGetQueryExecution struct { +} + +func (*awsAwsjson11_deserializeOpBatchGetQueryExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpBatchGetQueryExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorBatchGetQueryExecution(response, &metadata) + } + output := &BatchGetQueryExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentBatchGetQueryExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorBatchGetQueryExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCancelCapacityReservation struct { +} + +func (*awsAwsjson11_deserializeOpCancelCapacityReservation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCancelCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCancelCapacityReservation(response, &metadata) + } + output := &CancelCapacityReservationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCancelCapacityReservationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCancelCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateCapacityReservation struct { +} + +func (*awsAwsjson11_deserializeOpCreateCapacityReservation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateCapacityReservation(response, &metadata) + } + output := &CreateCapacityReservationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateCapacityReservationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateDataCatalog struct { +} + +func (*awsAwsjson11_deserializeOpCreateDataCatalog) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateDataCatalog) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateDataCatalog(response, &metadata) + } + output := &CreateDataCatalogOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateDataCatalogOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateDataCatalog(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateNamedQuery struct { +} + +func (*awsAwsjson11_deserializeOpCreateNamedQuery) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateNamedQuery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateNamedQuery(response, &metadata) + } + output := &CreateNamedQueryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateNamedQueryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateNamedQuery(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateNotebook struct { +} + +func (*awsAwsjson11_deserializeOpCreateNotebook) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateNotebook) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateNotebook(response, &metadata) + } + output := &CreateNotebookOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateNotebookOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateNotebook(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreatePreparedStatement struct { +} + +func (*awsAwsjson11_deserializeOpCreatePreparedStatement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreatePreparedStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreatePreparedStatement(response, &metadata) + } + output := &CreatePreparedStatementOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreatePreparedStatementOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreatePreparedStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreatePresignedNotebookUrl struct { +} + +func (*awsAwsjson11_deserializeOpCreatePresignedNotebookUrl) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreatePresignedNotebookUrl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreatePresignedNotebookUrl(response, &metadata) + } + output := &CreatePresignedNotebookUrlOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreatePresignedNotebookUrlOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreatePresignedNotebookUrl(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateWorkGroup struct { +} + +func (*awsAwsjson11_deserializeOpCreateWorkGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateWorkGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateWorkGroup(response, &metadata) + } + output := &CreateWorkGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateWorkGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateWorkGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteCapacityReservation struct { +} + +func (*awsAwsjson11_deserializeOpDeleteCapacityReservation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteCapacityReservation(response, &metadata) + } + output := &DeleteCapacityReservationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteCapacityReservationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteDataCatalog struct { +} + +func (*awsAwsjson11_deserializeOpDeleteDataCatalog) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteDataCatalog) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteDataCatalog(response, &metadata) + } + output := &DeleteDataCatalogOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteDataCatalogOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteDataCatalog(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteNamedQuery struct { +} + +func (*awsAwsjson11_deserializeOpDeleteNamedQuery) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteNamedQuery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteNamedQuery(response, &metadata) + } + output := &DeleteNamedQueryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteNamedQueryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteNamedQuery(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteNotebook struct { +} + +func (*awsAwsjson11_deserializeOpDeleteNotebook) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteNotebook) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteNotebook(response, &metadata) + } + output := &DeleteNotebookOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteNotebookOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteNotebook(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeletePreparedStatement struct { +} + +func (*awsAwsjson11_deserializeOpDeletePreparedStatement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeletePreparedStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeletePreparedStatement(response, &metadata) + } + output := &DeletePreparedStatementOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeletePreparedStatementOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeletePreparedStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteWorkGroup struct { +} + +func (*awsAwsjson11_deserializeOpDeleteWorkGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteWorkGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteWorkGroup(response, &metadata) + } + output := &DeleteWorkGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteWorkGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteWorkGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpExportNotebook struct { +} + +func (*awsAwsjson11_deserializeOpExportNotebook) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpExportNotebook) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorExportNotebook(response, &metadata) + } + output := &ExportNotebookOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentExportNotebookOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorExportNotebook(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetCalculationExecution struct { +} + +func (*awsAwsjson11_deserializeOpGetCalculationExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetCalculationExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetCalculationExecution(response, &metadata) + } + output := &GetCalculationExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetCalculationExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetCalculationExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetCalculationExecutionCode struct { +} + +func (*awsAwsjson11_deserializeOpGetCalculationExecutionCode) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetCalculationExecutionCode) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetCalculationExecutionCode(response, &metadata) + } + output := &GetCalculationExecutionCodeOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetCalculationExecutionCodeOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetCalculationExecutionCode(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetCalculationExecutionStatus struct { +} + +func (*awsAwsjson11_deserializeOpGetCalculationExecutionStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetCalculationExecutionStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetCalculationExecutionStatus(response, &metadata) + } + output := &GetCalculationExecutionStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetCalculationExecutionStatusOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetCalculationExecutionStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetCapacityAssignmentConfiguration struct { +} + +func (*awsAwsjson11_deserializeOpGetCapacityAssignmentConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetCapacityAssignmentConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetCapacityAssignmentConfiguration(response, &metadata) + } + output := &GetCapacityAssignmentConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetCapacityAssignmentConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetCapacityAssignmentConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetCapacityReservation struct { +} + +func (*awsAwsjson11_deserializeOpGetCapacityReservation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetCapacityReservation(response, &metadata) + } + output := &GetCapacityReservationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetCapacityReservationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetDatabase struct { +} + +func (*awsAwsjson11_deserializeOpGetDatabase) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetDatabase) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetDatabase(response, &metadata) + } + output := &GetDatabaseOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetDatabaseOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetDatabase(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("MetadataException", errorCode): + return awsAwsjson11_deserializeErrorMetadataException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetDataCatalog struct { +} + +func (*awsAwsjson11_deserializeOpGetDataCatalog) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetDataCatalog) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetDataCatalog(response, &metadata) + } + output := &GetDataCatalogOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetDataCatalogOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetDataCatalog(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetNamedQuery struct { +} + +func (*awsAwsjson11_deserializeOpGetNamedQuery) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetNamedQuery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetNamedQuery(response, &metadata) + } + output := &GetNamedQueryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetNamedQueryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetNamedQuery(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetNotebookMetadata struct { +} + +func (*awsAwsjson11_deserializeOpGetNotebookMetadata) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetNotebookMetadata) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetNotebookMetadata(response, &metadata) + } + output := &GetNotebookMetadataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetNotebookMetadataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetNotebookMetadata(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetPreparedStatement struct { +} + +func (*awsAwsjson11_deserializeOpGetPreparedStatement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetPreparedStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetPreparedStatement(response, &metadata) + } + output := &GetPreparedStatementOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetPreparedStatementOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetPreparedStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetQueryExecution struct { +} + +func (*awsAwsjson11_deserializeOpGetQueryExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetQueryExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetQueryExecution(response, &metadata) + } + output := &GetQueryExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetQueryExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetQueryExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetQueryResults struct { +} + +func (*awsAwsjson11_deserializeOpGetQueryResults) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetQueryResults) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetQueryResults(response, &metadata) + } + output := &GetQueryResultsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetQueryResultsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetQueryResults(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetQueryRuntimeStatistics struct { +} + +func (*awsAwsjson11_deserializeOpGetQueryRuntimeStatistics) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetQueryRuntimeStatistics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetQueryRuntimeStatistics(response, &metadata) + } + output := &GetQueryRuntimeStatisticsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetQueryRuntimeStatisticsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetQueryRuntimeStatistics(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetSession struct { +} + +func (*awsAwsjson11_deserializeOpGetSession) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetSession(response, &metadata) + } + output := &GetSessionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetSessionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetSessionStatus struct { +} + +func (*awsAwsjson11_deserializeOpGetSessionStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetSessionStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetSessionStatus(response, &metadata) + } + output := &GetSessionStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetSessionStatusOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetSessionStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetTableMetadata struct { +} + +func (*awsAwsjson11_deserializeOpGetTableMetadata) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetTableMetadata) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetTableMetadata(response, &metadata) + } + output := &GetTableMetadataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetTableMetadataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetTableMetadata(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("MetadataException", errorCode): + return awsAwsjson11_deserializeErrorMetadataException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetWorkGroup struct { +} + +func (*awsAwsjson11_deserializeOpGetWorkGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetWorkGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetWorkGroup(response, &metadata) + } + output := &GetWorkGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetWorkGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetWorkGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpImportNotebook struct { +} + +func (*awsAwsjson11_deserializeOpImportNotebook) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpImportNotebook) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorImportNotebook(response, &metadata) + } + output := &ImportNotebookOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentImportNotebookOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorImportNotebook(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListApplicationDPUSizes struct { +} + +func (*awsAwsjson11_deserializeOpListApplicationDPUSizes) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListApplicationDPUSizes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListApplicationDPUSizes(response, &metadata) + } + output := &ListApplicationDPUSizesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListApplicationDPUSizesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListApplicationDPUSizes(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListCalculationExecutions struct { +} + +func (*awsAwsjson11_deserializeOpListCalculationExecutions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListCalculationExecutions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListCalculationExecutions(response, &metadata) + } + output := &ListCalculationExecutionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListCalculationExecutionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListCalculationExecutions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListCapacityReservations struct { +} + +func (*awsAwsjson11_deserializeOpListCapacityReservations) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListCapacityReservations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListCapacityReservations(response, &metadata) + } + output := &ListCapacityReservationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListCapacityReservationsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListCapacityReservations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListDatabases struct { +} + +func (*awsAwsjson11_deserializeOpListDatabases) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListDatabases) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListDatabases(response, &metadata) + } + output := &ListDatabasesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListDatabasesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListDatabases(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("MetadataException", errorCode): + return awsAwsjson11_deserializeErrorMetadataException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListDataCatalogs struct { +} + +func (*awsAwsjson11_deserializeOpListDataCatalogs) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListDataCatalogs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListDataCatalogs(response, &metadata) + } + output := &ListDataCatalogsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListDataCatalogsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListDataCatalogs(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListEngineVersions struct { +} + +func (*awsAwsjson11_deserializeOpListEngineVersions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListEngineVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListEngineVersions(response, &metadata) + } + output := &ListEngineVersionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListEngineVersionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListEngineVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListExecutors struct { +} + +func (*awsAwsjson11_deserializeOpListExecutors) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListExecutors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListExecutors(response, &metadata) + } + output := &ListExecutorsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListExecutorsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListExecutors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListNamedQueries struct { +} + +func (*awsAwsjson11_deserializeOpListNamedQueries) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListNamedQueries) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListNamedQueries(response, &metadata) + } + output := &ListNamedQueriesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListNamedQueriesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListNamedQueries(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListNotebookMetadata struct { +} + +func (*awsAwsjson11_deserializeOpListNotebookMetadata) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListNotebookMetadata) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListNotebookMetadata(response, &metadata) + } + output := &ListNotebookMetadataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListNotebookMetadataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListNotebookMetadata(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListNotebookSessions struct { +} + +func (*awsAwsjson11_deserializeOpListNotebookSessions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListNotebookSessions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListNotebookSessions(response, &metadata) + } + output := &ListNotebookSessionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListNotebookSessionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListNotebookSessions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListPreparedStatements struct { +} + +func (*awsAwsjson11_deserializeOpListPreparedStatements) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListPreparedStatements) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListPreparedStatements(response, &metadata) + } + output := &ListPreparedStatementsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListPreparedStatementsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListPreparedStatements(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListQueryExecutions struct { +} + +func (*awsAwsjson11_deserializeOpListQueryExecutions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListQueryExecutions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListQueryExecutions(response, &metadata) + } + output := &ListQueryExecutionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListQueryExecutionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListQueryExecutions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListSessions struct { +} + +func (*awsAwsjson11_deserializeOpListSessions) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListSessions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListSessions(response, &metadata) + } + output := &ListSessionsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListSessionsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListSessions(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTableMetadata struct { +} + +func (*awsAwsjson11_deserializeOpListTableMetadata) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTableMetadata) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTableMetadata(response, &metadata) + } + output := &ListTableMetadataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTableMetadataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTableMetadata(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("MetadataException", errorCode): + return awsAwsjson11_deserializeErrorMetadataException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListWorkGroups struct { +} + +func (*awsAwsjson11_deserializeOpListWorkGroups) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListWorkGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListWorkGroups(response, &metadata) + } + output := &ListWorkGroupsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListWorkGroupsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListWorkGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutCapacityAssignmentConfiguration struct { +} + +func (*awsAwsjson11_deserializeOpPutCapacityAssignmentConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutCapacityAssignmentConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutCapacityAssignmentConfiguration(response, &metadata) + } + output := &PutCapacityAssignmentConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentPutCapacityAssignmentConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutCapacityAssignmentConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartCalculationExecution struct { +} + +func (*awsAwsjson11_deserializeOpStartCalculationExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartCalculationExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartCalculationExecution(response, &metadata) + } + output := &StartCalculationExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartCalculationExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartCalculationExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartQueryExecution struct { +} + +func (*awsAwsjson11_deserializeOpStartQueryExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartQueryExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartQueryExecution(response, &metadata) + } + output := &StartQueryExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartQueryExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartQueryExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartSession struct { +} + +func (*awsAwsjson11_deserializeOpStartSession) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartSession(response, &metadata) + } + output := &StartSessionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartSessionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("SessionAlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorSessionAlreadyExistsException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStopCalculationExecution struct { +} + +func (*awsAwsjson11_deserializeOpStopCalculationExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStopCalculationExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStopCalculationExecution(response, &metadata) + } + output := &StopCalculationExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStopCalculationExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStopCalculationExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStopQueryExecution struct { +} + +func (*awsAwsjson11_deserializeOpStopQueryExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStopQueryExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStopQueryExecution(response, &metadata) + } + output := &StopQueryExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStopQueryExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStopQueryExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTerminateSession struct { +} + +func (*awsAwsjson11_deserializeOpTerminateSession) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTerminateSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTerminateSession(response, &metadata) + } + output := &TerminateSessionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentTerminateSessionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTerminateSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUntagResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateCapacityReservation struct { +} + +func (*awsAwsjson11_deserializeOpUpdateCapacityReservation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateCapacityReservation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateCapacityReservation(response, &metadata) + } + output := &UpdateCapacityReservationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateCapacityReservationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateCapacityReservation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateDataCatalog struct { +} + +func (*awsAwsjson11_deserializeOpUpdateDataCatalog) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateDataCatalog) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateDataCatalog(response, &metadata) + } + output := &UpdateDataCatalogOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateDataCatalogOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateDataCatalog(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateNamedQuery struct { +} + +func (*awsAwsjson11_deserializeOpUpdateNamedQuery) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateNamedQuery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateNamedQuery(response, &metadata) + } + output := &UpdateNamedQueryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateNamedQueryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateNamedQuery(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateNotebook struct { +} + +func (*awsAwsjson11_deserializeOpUpdateNotebook) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateNotebook) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateNotebook(response, &metadata) + } + output := &UpdateNotebookOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateNotebookOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateNotebook(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateNotebookMetadata struct { +} + +func (*awsAwsjson11_deserializeOpUpdateNotebookMetadata) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateNotebookMetadata) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateNotebookMetadata(response, &metadata) + } + output := &UpdateNotebookMetadataOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateNotebookMetadataOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateNotebookMetadata(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsAwsjson11_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdatePreparedStatement struct { +} + +func (*awsAwsjson11_deserializeOpUpdatePreparedStatement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdatePreparedStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdatePreparedStatement(response, &metadata) + } + output := &UpdatePreparedStatementOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdatePreparedStatementOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdatePreparedStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateWorkGroup struct { +} + +func (*awsAwsjson11_deserializeOpUpdateWorkGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateWorkGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateWorkGroup(response, &metadata) + } + output := &UpdateWorkGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateWorkGroupOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateWorkGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsAwsjson11_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsAwsjson11_deserializeErrorInvalidRequestException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InternalServerException{} + err := awsAwsjson11_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidRequestException{} + err := awsAwsjson11_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.MetadataException{} + err := awsAwsjson11_deserializeDocumentMetadataException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ResourceNotFoundException{} + err := awsAwsjson11_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorSessionAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.SessionAlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentSessionAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TooManyRequestsException{} + err := awsAwsjson11_deserializeDocumentTooManyRequestsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentAclConfiguration(v **types.AclConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AclConfiguration + if *v == nil { + sv = &types.AclConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "S3AclOption": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3AclOption to be of type string, got %T instead", value) + } + sv.S3AclOption = types.S3AclOption(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentApplicationDPUSizes(v **types.ApplicationDPUSizes, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ApplicationDPUSizes + if *v == nil { + sv = &types.ApplicationDPUSizes{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ApplicationRuntimeId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.ApplicationRuntimeId = ptr.String(jtv) + } + + case "SupportedDPUSizes": + if err := awsAwsjson11_deserializeDocumentSupportedDPUSizeList(&sv.SupportedDPUSizes, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentApplicationDPUSizesList(v *[]types.ApplicationDPUSizes, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ApplicationDPUSizes + if *v == nil { + cv = []types.ApplicationDPUSizes{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ApplicationDPUSizes + destAddr := &col + if err := awsAwsjson11_deserializeDocumentApplicationDPUSizes(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAthenaError(v **types.AthenaError, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AthenaError + if *v == nil { + sv = &types.AthenaError{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ErrorCategory": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ErrorCategory to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ErrorCategory = ptr.Int32(int32(i64)) + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "ErrorType": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ErrorType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ErrorType = ptr.Int32(int32(i64)) + } + + case "Retryable": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Retryable = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCalculationResult(v **types.CalculationResult, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CalculationResult + if *v == nil { + sv = &types.CalculationResult{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ResultS3Uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Uri to be of type string, got %T instead", value) + } + sv.ResultS3Uri = ptr.String(jtv) + } + + case "ResultType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CalculationResultType to be of type string, got %T instead", value) + } + sv.ResultType = ptr.String(jtv) + } + + case "StdErrorS3Uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Uri to be of type string, got %T instead", value) + } + sv.StdErrorS3Uri = ptr.String(jtv) + } + + case "StdOutS3Uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Uri to be of type string, got %T instead", value) + } + sv.StdOutS3Uri = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCalculationsList(v *[]types.CalculationSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CalculationSummary + if *v == nil { + cv = []types.CalculationSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CalculationSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCalculationSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCalculationStatistics(v **types.CalculationStatistics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CalculationStatistics + if *v == nil { + sv = &types.CalculationStatistics{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DpuExecutionInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DpuExecutionInMillis = ptr.Int64(i64) + } + + case "Progress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Progress = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCalculationStatus(v **types.CalculationStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CalculationStatus + if *v == nil { + sv = &types.CalculationStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CompletionDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CompletionDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CalculationExecutionState to be of type string, got %T instead", value) + } + sv.State = types.CalculationExecutionState(jtv) + } + + case "StateChangeReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.StateChangeReason = ptr.String(jtv) + } + + case "SubmissionDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.SubmissionDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCalculationSummary(v **types.CalculationSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CalculationSummary + if *v == nil { + sv = &types.CalculationSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CalculationExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CalculationExecutionId to be of type string, got %T instead", value) + } + sv.CalculationExecutionId = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Status": + if err := awsAwsjson11_deserializeDocumentCalculationStatus(&sv.Status, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityAllocation(v **types.CapacityAllocation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityAllocation + if *v == nil { + sv = &types.CapacityAllocation{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RequestCompletionTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.RequestCompletionTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "RequestTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.RequestTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CapacityAllocationStatus to be of type string, got %T instead", value) + } + sv.Status = types.CapacityAllocationStatus(jtv) + } + + case "StatusMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.StatusMessage = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityAssignment(v **types.CapacityAssignment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityAssignment + if *v == nil { + sv = &types.CapacityAssignment{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "WorkGroupNames": + if err := awsAwsjson11_deserializeDocumentWorkGroupNamesList(&sv.WorkGroupNames, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityAssignmentConfiguration(v **types.CapacityAssignmentConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityAssignmentConfiguration + if *v == nil { + sv = &types.CapacityAssignmentConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CapacityAssignments": + if err := awsAwsjson11_deserializeDocumentCapacityAssignmentsList(&sv.CapacityAssignments, value); err != nil { + return err + } + + case "CapacityReservationName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CapacityReservationName to be of type string, got %T instead", value) + } + sv.CapacityReservationName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityAssignmentsList(v *[]types.CapacityAssignment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CapacityAssignment + if *v == nil { + cv = []types.CapacityAssignment{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CapacityAssignment + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCapacityAssignment(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityReservation(v **types.CapacityReservation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityReservation + if *v == nil { + sv = &types.CapacityReservation{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AllocatedDpus": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected AllocatedDpusInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AllocatedDpus = ptr.Int32(int32(i64)) + } + + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "LastAllocation": + if err := awsAwsjson11_deserializeDocumentCapacityAllocation(&sv.LastAllocation, value); err != nil { + return err + } + + case "LastSuccessfulAllocationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastSuccessfulAllocationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CapacityReservationName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CapacityReservationStatus to be of type string, got %T instead", value) + } + sv.Status = types.CapacityReservationStatus(jtv) + } + + case "TargetDpus": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected TargetDpusInteger to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TargetDpus = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCapacityReservationsList(v *[]types.CapacityReservation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CapacityReservation + if *v == nil { + cv = []types.CapacityReservation{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CapacityReservation + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCapacityReservation(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentColumn(v **types.Column, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Column + if *v == nil { + sv = &types.Column{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Comment": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CommentString to be of type string, got %T instead", value) + } + sv.Comment = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TypeString to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentColumnInfo(v **types.ColumnInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ColumnInfo + if *v == nil { + sv = &types.ColumnInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CaseSensitive": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.CaseSensitive = jtv + } + + case "CatalogName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.CatalogName = ptr.String(jtv) + } + + case "Label": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Label = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Nullable": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ColumnNullable to be of type string, got %T instead", value) + } + sv.Nullable = types.ColumnNullable(jtv) + } + + case "Precision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Precision = int32(i64) + } + + case "Scale": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Scale = int32(i64) + } + + case "SchemaName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SchemaName = ptr.String(jtv) + } + + case "TableName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.TableName = ptr.String(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentColumnInfoList(v *[]types.ColumnInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ColumnInfo + if *v == nil { + cv = []types.ColumnInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ColumnInfo + destAddr := &col + if err := awsAwsjson11_deserializeDocumentColumnInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentColumnList(v *[]types.Column, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Column + if *v == nil { + cv = []types.Column{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Column + destAddr := &col + if err := awsAwsjson11_deserializeDocumentColumn(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomerContentEncryptionConfiguration(v **types.CustomerContentEncryptionConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomerContentEncryptionConfiguration + if *v == nil { + sv = &types.CustomerContentEncryptionConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KmsKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KmsKey to be of type string, got %T instead", value) + } + sv.KmsKey = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDatabase(v **types.Database, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Database + if *v == nil { + sv = &types.Database{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Parameters": + if err := awsAwsjson11_deserializeDocumentParametersMap(&sv.Parameters, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDatabaseList(v *[]types.Database, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Database + if *v == nil { + cv = []types.Database{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Database + destAddr := &col + if err := awsAwsjson11_deserializeDocumentDatabase(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentDataCatalog(v **types.DataCatalog, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataCatalog + if *v == nil { + sv = &types.DataCatalog{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ConnectionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionType to be of type string, got %T instead", value) + } + sv.ConnectionType = types.ConnectionType(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Error = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CatalogNameString to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Parameters": + if err := awsAwsjson11_deserializeDocumentParametersMap(&sv.Parameters, value); err != nil { + return err + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataCatalogStatus to be of type string, got %T instead", value) + } + sv.Status = types.DataCatalogStatus(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataCatalogType to be of type string, got %T instead", value) + } + sv.Type = types.DataCatalogType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDataCatalogSummary(v **types.DataCatalogSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataCatalogSummary + if *v == nil { + sv = &types.DataCatalogSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CatalogName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CatalogNameString to be of type string, got %T instead", value) + } + sv.CatalogName = ptr.String(jtv) + } + + case "ConnectionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionType to be of type string, got %T instead", value) + } + sv.ConnectionType = types.ConnectionType(jtv) + } + + case "Error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Error = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataCatalogStatus to be of type string, got %T instead", value) + } + sv.Status = types.DataCatalogStatus(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataCatalogType to be of type string, got %T instead", value) + } + sv.Type = types.DataCatalogType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDataCatalogSummaryList(v *[]types.DataCatalogSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DataCatalogSummary + if *v == nil { + cv = []types.DataCatalogSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DataCatalogSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentDataCatalogSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentDatum(v **types.Datum, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Datum + if *v == nil { + sv = &types.Datum{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "VarCharValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected datumString to be of type string, got %T instead", value) + } + sv.VarCharValue = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDatumList(v *[]types.Datum, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Datum + if *v == nil { + cv = []types.Datum{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Datum + destAddr := &col + if err := awsAwsjson11_deserializeDocumentDatum(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EncryptionConfiguration + if *v == nil { + sv = &types.EncryptionConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EncryptionOption": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionOption to be of type string, got %T instead", value) + } + sv.EncryptionOption = types.EncryptionOption(jtv) + } + + case "KmsKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.KmsKey = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEngineConfiguration(v **types.EngineConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EngineConfiguration + if *v == nil { + sv = &types.EngineConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AdditionalConfigs": + if err := awsAwsjson11_deserializeDocumentParametersMap(&sv.AdditionalConfigs, value); err != nil { + return err + } + + case "CoordinatorDpuSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected CoordinatorDpuSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.CoordinatorDpuSize = ptr.Int32(int32(i64)) + } + + case "DefaultExecutorDpuSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected DefaultExecutorDpuSize to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DefaultExecutorDpuSize = ptr.Int32(int32(i64)) + } + + case "MaxConcurrentDpus": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected MaxConcurrentDpus to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxConcurrentDpus = ptr.Int32(int32(i64)) + } + + case "SparkProperties": + if err := awsAwsjson11_deserializeDocumentParametersMap(&sv.SparkProperties, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEngineVersion(v **types.EngineVersion, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.EngineVersion + if *v == nil { + sv = &types.EngineVersion{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EffectiveEngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.EffectiveEngineVersion = ptr.String(jtv) + } + + case "SelectedEngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.SelectedEngineVersion = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEngineVersionsList(v *[]types.EngineVersion, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EngineVersion + if *v == nil { + cv = []types.EngineVersion{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EngineVersion + destAddr := &col + if err := awsAwsjson11_deserializeDocumentEngineVersion(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentExecutionParameters(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecutionParameter to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentExecutorsSummary(v **types.ExecutorsSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExecutorsSummary + if *v == nil { + sv = &types.ExecutorsSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExecutorId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecutorId to be of type string, got %T instead", value) + } + sv.ExecutorId = ptr.String(jtv) + } + + case "ExecutorSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExecutorSize = ptr.Int64(i64) + } + + case "ExecutorState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecutorState to be of type string, got %T instead", value) + } + sv.ExecutorState = types.ExecutorState(jtv) + } + + case "ExecutorType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExecutorType to be of type string, got %T instead", value) + } + sv.ExecutorType = types.ExecutorType(jtv) + } + + case "StartDateTime": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StartDateTime = ptr.Int64(i64) + } + + case "TerminationDateTime": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TerminationDateTime = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentExecutorsSummaryList(v *[]types.ExecutorsSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ExecutorsSummary + if *v == nil { + cv = []types.ExecutorsSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ExecutorsSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentExecutorsSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentIdentityCenterConfiguration(v **types.IdentityCenterConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IdentityCenterConfiguration + if *v == nil { + sv = &types.IdentityCenterConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EnableIdentityCenter": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.EnableIdentityCenter = ptr.Bool(jtv) + } + + case "IdentityCenterInstanceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdentityCenterInstanceArn to be of type string, got %T instead", value) + } + sv.IdentityCenterInstanceArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AthenaErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.AthenaErrorCode = ptr.String(jtv) + } + + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMetadataException(v **types.MetadataException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MetadataException + if *v == nil { + sv = &types.MetadataException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNamedQuery(v **types.NamedQuery, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NamedQuery + if *v == nil { + sv = &types.NamedQuery{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Database": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DatabaseString to be of type string, got %T instead", value) + } + sv.Database = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "NamedQueryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NamedQueryId to be of type string, got %T instead", value) + } + sv.NamedQueryId = ptr.String(jtv) + } + + case "QueryString": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryString to be of type string, got %T instead", value) + } + sv.QueryString = ptr.String(jtv) + } + + case "WorkGroup": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + sv.WorkGroup = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNamedQueryIdList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NamedQueryId to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNamedQueryList(v *[]types.NamedQuery, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.NamedQuery + if *v == nil { + cv = []types.NamedQuery{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.NamedQuery + destAddr := &col + if err := awsAwsjson11_deserializeDocumentNamedQuery(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNotebookMetadata(v **types.NotebookMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NotebookMetadata + if *v == nil { + sv = &types.NotebookMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "LastModifiedTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NotebookName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "NotebookId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NotebookId to be of type string, got %T instead", value) + } + sv.NotebookId = ptr.String(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NotebookType to be of type string, got %T instead", value) + } + sv.Type = types.NotebookType(jtv) + } + + case "WorkGroup": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + sv.WorkGroup = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentNotebookMetadataArray(v *[]types.NotebookMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.NotebookMetadata + if *v == nil { + cv = []types.NotebookMetadata{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.NotebookMetadata + destAddr := &col + if err := awsAwsjson11_deserializeDocumentNotebookMetadata(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNotebookSessionsList(v *[]types.NotebookSessionSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.NotebookSessionSummary + if *v == nil { + cv = []types.NotebookSessionSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.NotebookSessionSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentNotebookSessionSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNotebookSessionSummary(v **types.NotebookSessionSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NotebookSessionSummary + if *v == nil { + sv = &types.NotebookSessionSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentParametersMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ParametersMapValue to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentPreparedStatement(v **types.PreparedStatement, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PreparedStatement + if *v == nil { + sv = &types.PreparedStatement{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "LastModifiedTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "QueryStatement": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryString to be of type string, got %T instead", value) + } + sv.QueryStatement = ptr.String(jtv) + } + + case "StatementName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatementName to be of type string, got %T instead", value) + } + sv.StatementName = ptr.String(jtv) + } + + case "WorkGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + sv.WorkGroupName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPreparedStatementDetailsList(v *[]types.PreparedStatement, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PreparedStatement + if *v == nil { + cv = []types.PreparedStatement{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PreparedStatement + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPreparedStatement(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentPreparedStatementsList(v *[]types.PreparedStatementSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PreparedStatementSummary + if *v == nil { + cv = []types.PreparedStatementSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PreparedStatementSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentPreparedStatementSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentPreparedStatementSummary(v **types.PreparedStatementSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PreparedStatementSummary + if *v == nil { + sv = &types.PreparedStatementSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "LastModifiedTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "StatementName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatementName to be of type string, got %T instead", value) + } + sv.StatementName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryExecution(v **types.QueryExecution, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryExecution + if *v == nil { + sv = &types.QueryExecution{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EngineVersion": + if err := awsAwsjson11_deserializeDocumentEngineVersion(&sv.EngineVersion, value); err != nil { + return err + } + + case "ExecutionParameters": + if err := awsAwsjson11_deserializeDocumentExecutionParameters(&sv.ExecutionParameters, value); err != nil { + return err + } + + case "Query": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryString to be of type string, got %T instead", value) + } + sv.Query = ptr.String(jtv) + } + + case "QueryExecutionContext": + if err := awsAwsjson11_deserializeDocumentQueryExecutionContext(&sv.QueryExecutionContext, value); err != nil { + return err + } + + case "QueryExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryExecutionId to be of type string, got %T instead", value) + } + sv.QueryExecutionId = ptr.String(jtv) + } + + case "QueryResultsS3AccessGrantsConfiguration": + if err := awsAwsjson11_deserializeDocumentQueryResultsS3AccessGrantsConfiguration(&sv.QueryResultsS3AccessGrantsConfiguration, value); err != nil { + return err + } + + case "ResultConfiguration": + if err := awsAwsjson11_deserializeDocumentResultConfiguration(&sv.ResultConfiguration, value); err != nil { + return err + } + + case "ResultReuseConfiguration": + if err := awsAwsjson11_deserializeDocumentResultReuseConfiguration(&sv.ResultReuseConfiguration, value); err != nil { + return err + } + + case "StatementType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatementType to be of type string, got %T instead", value) + } + sv.StatementType = types.StatementType(jtv) + } + + case "Statistics": + if err := awsAwsjson11_deserializeDocumentQueryExecutionStatistics(&sv.Statistics, value); err != nil { + return err + } + + case "Status": + if err := awsAwsjson11_deserializeDocumentQueryExecutionStatus(&sv.Status, value); err != nil { + return err + } + + case "SubstatementType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.SubstatementType = ptr.String(jtv) + } + + case "WorkGroup": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + sv.WorkGroup = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryExecutionContext(v **types.QueryExecutionContext, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryExecutionContext + if *v == nil { + sv = &types.QueryExecutionContext{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Catalog": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CatalogNameString to be of type string, got %T instead", value) + } + sv.Catalog = ptr.String(jtv) + } + + case "Database": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DatabaseString to be of type string, got %T instead", value) + } + sv.Database = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryExecutionIdList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryExecutionId to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryExecutionList(v *[]types.QueryExecution, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.QueryExecution + if *v == nil { + cv = []types.QueryExecution{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.QueryExecution + destAddr := &col + if err := awsAwsjson11_deserializeDocumentQueryExecution(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryExecutionStatistics(v **types.QueryExecutionStatistics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryExecutionStatistics + if *v == nil { + sv = &types.QueryExecutionStatistics{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DataManifestLocation": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.DataManifestLocation = ptr.String(jtv) + } + + case "DataScannedInBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DataScannedInBytes = ptr.Int64(i64) + } + + case "EngineExecutionTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.EngineExecutionTimeInMillis = ptr.Int64(i64) + } + + case "QueryPlanningTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryPlanningTimeInMillis = ptr.Int64(i64) + } + + case "QueryQueueTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryQueueTimeInMillis = ptr.Int64(i64) + } + + case "ResultReuseInformation": + if err := awsAwsjson11_deserializeDocumentResultReuseInformation(&sv.ResultReuseInformation, value); err != nil { + return err + } + + case "ServicePreProcessingTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ServicePreProcessingTimeInMillis = ptr.Int64(i64) + } + + case "ServiceProcessingTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ServiceProcessingTimeInMillis = ptr.Int64(i64) + } + + case "TotalExecutionTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalExecutionTimeInMillis = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryExecutionStatus(v **types.QueryExecutionStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryExecutionStatus + if *v == nil { + sv = &types.QueryExecutionStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AthenaError": + if err := awsAwsjson11_deserializeDocumentAthenaError(&sv.AthenaError, value); err != nil { + return err + } + + case "CompletionDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CompletionDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryExecutionState to be of type string, got %T instead", value) + } + sv.State = types.QueryExecutionState(jtv) + } + + case "StateChangeReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.StateChangeReason = ptr.String(jtv) + } + + case "SubmissionDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.SubmissionDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryResultsS3AccessGrantsConfiguration(v **types.QueryResultsS3AccessGrantsConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryResultsS3AccessGrantsConfiguration + if *v == nil { + sv = &types.QueryResultsS3AccessGrantsConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AuthenticationType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AuthenticationType to be of type string, got %T instead", value) + } + sv.AuthenticationType = types.AuthenticationType(jtv) + } + + case "CreateUserLevelPrefix": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.CreateUserLevelPrefix = ptr.Bool(jtv) + } + + case "EnableS3AccessGrants": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.EnableS3AccessGrants = ptr.Bool(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryRuntimeStatistics(v **types.QueryRuntimeStatistics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryRuntimeStatistics + if *v == nil { + sv = &types.QueryRuntimeStatistics{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "OutputStage": + if err := awsAwsjson11_deserializeDocumentQueryStage(&sv.OutputStage, value); err != nil { + return err + } + + case "Rows": + if err := awsAwsjson11_deserializeDocumentQueryRuntimeStatisticsRows(&sv.Rows, value); err != nil { + return err + } + + case "Timeline": + if err := awsAwsjson11_deserializeDocumentQueryRuntimeStatisticsTimeline(&sv.Timeline, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryRuntimeStatisticsRows(v **types.QueryRuntimeStatisticsRows, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryRuntimeStatisticsRows + if *v == nil { + sv = &types.QueryRuntimeStatisticsRows{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "InputBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InputBytes = ptr.Int64(i64) + } + + case "InputRows": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InputRows = ptr.Int64(i64) + } + + case "OutputBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.OutputBytes = ptr.Int64(i64) + } + + case "OutputRows": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.OutputRows = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryRuntimeStatisticsTimeline(v **types.QueryRuntimeStatisticsTimeline, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryRuntimeStatisticsTimeline + if *v == nil { + sv = &types.QueryRuntimeStatisticsTimeline{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EngineExecutionTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.EngineExecutionTimeInMillis = ptr.Int64(i64) + } + + case "QueryPlanningTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryPlanningTimeInMillis = ptr.Int64(i64) + } + + case "QueryQueueTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryQueueTimeInMillis = ptr.Int64(i64) + } + + case "ServicePreProcessingTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ServicePreProcessingTimeInMillis = ptr.Int64(i64) + } + + case "ServiceProcessingTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ServiceProcessingTimeInMillis = ptr.Int64(i64) + } + + case "TotalExecutionTimeInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalExecutionTimeInMillis = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryStage(v **types.QueryStage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryStage + if *v == nil { + sv = &types.QueryStage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExecutionTime": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExecutionTime = ptr.Int64(i64) + } + + case "InputBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InputBytes = ptr.Int64(i64) + } + + case "InputRows": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InputRows = ptr.Int64(i64) + } + + case "OutputBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.OutputBytes = ptr.Int64(i64) + } + + case "OutputRows": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.OutputRows = ptr.Int64(i64) + } + + case "QueryStagePlan": + if err := awsAwsjson11_deserializeDocumentQueryStagePlanNode(&sv.QueryStagePlan, value); err != nil { + return err + } + + case "StageId": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StageId = ptr.Int64(i64) + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.State = ptr.String(jtv) + } + + case "SubStages": + if err := awsAwsjson11_deserializeDocumentQueryStages(&sv.SubStages, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryStagePlanNode(v **types.QueryStagePlanNode, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.QueryStagePlanNode + if *v == nil { + sv = &types.QueryStagePlanNode{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Children": + if err := awsAwsjson11_deserializeDocumentQueryStagePlanNodes(&sv.Children, value); err != nil { + return err + } + + case "Identifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Identifier = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "RemoteSources": + if err := awsAwsjson11_deserializeDocumentStringList(&sv.RemoteSources, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryStagePlanNodes(v *[]types.QueryStagePlanNode, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.QueryStagePlanNode + if *v == nil { + cv = []types.QueryStagePlanNode{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.QueryStagePlanNode + destAddr := &col + if err := awsAwsjson11_deserializeDocumentQueryStagePlanNode(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentQueryStages(v *[]types.QueryStage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.QueryStage + if *v == nil { + cv = []types.QueryStage{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.QueryStage + destAddr := &col + if err := awsAwsjson11_deserializeDocumentQueryStage(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "ResourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AmazonResourceName to be of type string, got %T instead", value) + } + sv.ResourceName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResultConfiguration(v **types.ResultConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResultConfiguration + if *v == nil { + sv = &types.ResultConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AclConfiguration": + if err := awsAwsjson11_deserializeDocumentAclConfiguration(&sv.AclConfiguration, value); err != nil { + return err + } + + case "EncryptionConfiguration": + if err := awsAwsjson11_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, value); err != nil { + return err + } + + case "ExpectedBucketOwner": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AwsAccountId to be of type string, got %T instead", value) + } + sv.ExpectedBucketOwner = ptr.String(jtv) + } + + case "OutputLocation": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResultOutputLocation to be of type string, got %T instead", value) + } + sv.OutputLocation = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResultReuseByAgeConfiguration(v **types.ResultReuseByAgeConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResultReuseByAgeConfiguration + if *v == nil { + sv = &types.ResultReuseByAgeConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + case "MaxAgeInMinutes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Age to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxAgeInMinutes = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResultReuseConfiguration(v **types.ResultReuseConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResultReuseConfiguration + if *v == nil { + sv = &types.ResultReuseConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ResultReuseByAgeConfiguration": + if err := awsAwsjson11_deserializeDocumentResultReuseByAgeConfiguration(&sv.ResultReuseByAgeConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResultReuseInformation(v **types.ResultReuseInformation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResultReuseInformation + if *v == nil { + sv = &types.ResultReuseInformation{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReusedPreviousResult": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.ReusedPreviousResult = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResultSet(v **types.ResultSet, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResultSet + if *v == nil { + sv = &types.ResultSet{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ResultSetMetadata": + if err := awsAwsjson11_deserializeDocumentResultSetMetadata(&sv.ResultSetMetadata, value); err != nil { + return err + } + + case "Rows": + if err := awsAwsjson11_deserializeDocumentRowList(&sv.Rows, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentResultSetMetadata(v **types.ResultSetMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResultSetMetadata + if *v == nil { + sv = &types.ResultSetMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ColumnInfo": + if err := awsAwsjson11_deserializeDocumentColumnInfoList(&sv.ColumnInfo, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRow(v **types.Row, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Row + if *v == nil { + sv = &types.Row{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Data": + if err := awsAwsjson11_deserializeDocumentDatumList(&sv.Data, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentRowList(v *[]types.Row, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Row + if *v == nil { + cv = []types.Row{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Row + destAddr := &col + if err := awsAwsjson11_deserializeDocumentRow(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSessionAlreadyExistsException(v **types.SessionAlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SessionAlreadyExistsException + if *v == nil { + sv = &types.SessionAlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSessionConfiguration(v **types.SessionConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SessionConfiguration + if *v == nil { + sv = &types.SessionConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EncryptionConfiguration": + if err := awsAwsjson11_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, value); err != nil { + return err + } + + case "ExecutionRole": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleArn to be of type string, got %T instead", value) + } + sv.ExecutionRole = ptr.String(jtv) + } + + case "IdleTimeoutSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.IdleTimeoutSeconds = ptr.Int64(i64) + } + + case "WorkingDirectory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResultOutputLocation to be of type string, got %T instead", value) + } + sv.WorkingDirectory = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSessionsList(v *[]types.SessionSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SessionSummary + if *v == nil { + cv = []types.SessionSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SessionSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentSessionSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSessionStatistics(v **types.SessionStatistics, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SessionStatistics + if *v == nil { + sv = &types.SessionStatistics{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DpuExecutionInMillis": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DpuExecutionInMillis = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSessionStatus(v **types.SessionStatus, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SessionStatus + if *v == nil { + sv = &types.SessionStatus{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EndDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EndDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "IdleSinceDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.IdleSinceDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "LastModifiedDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "StartDateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.StartDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionState to be of type string, got %T instead", value) + } + sv.State = types.SessionState(jtv) + } + + case "StateChangeReason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.StateChangeReason = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentSessionSummary(v **types.SessionSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SessionSummary + if *v == nil { + sv = &types.SessionSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "EngineVersion": + if err := awsAwsjson11_deserializeDocumentEngineVersion(&sv.EngineVersion, value); err != nil { + return err + } + + case "NotebookVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.NotebookVersion = ptr.String(jtv) + } + + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + case "Status": + if err := awsAwsjson11_deserializeDocumentSessionStatus(&sv.Status, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentStringList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSupportedDPUSizeList(v *[]int32, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []int32 + if *v == nil { + cv = []int32{} + } else { + cv = *v + } + + for _, value := range shape { + var col int32 + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + col = int32(i64) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTableMetadata(v **types.TableMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TableMetadata + if *v == nil { + sv = &types.TableMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Columns": + if err := awsAwsjson11_deserializeDocumentColumnList(&sv.Columns, value); err != nil { + return err + } + + case "CreateTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreateTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "LastAccessTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastAccessTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Parameters": + if err := awsAwsjson11_deserializeDocumentParametersMap(&sv.Parameters, value); err != nil { + return err + } + + case "PartitionKeys": + if err := awsAwsjson11_deserializeDocumentColumnList(&sv.PartitionKeys, value); err != nil { + return err + } + + case "TableType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TableTypeString to be of type string, got %T instead", value) + } + sv.TableType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTableMetadataList(v *[]types.TableMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TableMetadata + if *v == nil { + cv = []types.TableMetadata{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TableMetadata + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTableMetadata(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Key": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.Key = ptr.String(jtv) + } + + case "Value": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.Value = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyRequestsException + if *v == nil { + sv = &types.TooManyRequestsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "Reason": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ThrottleReason to be of type string, got %T instead", value) + } + sv.Reason = types.ThrottleReason(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedNamedQueryId(v **types.UnprocessedNamedQueryId, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnprocessedNamedQueryId + if *v == nil { + sv = &types.UnprocessedNamedQueryId{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.ErrorCode = ptr.String(jtv) + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "NamedQueryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NamedQueryId to be of type string, got %T instead", value) + } + sv.NamedQueryId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedNamedQueryIdList(v *[]types.UnprocessedNamedQueryId, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.UnprocessedNamedQueryId + if *v == nil { + cv = []types.UnprocessedNamedQueryId{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.UnprocessedNamedQueryId + destAddr := &col + if err := awsAwsjson11_deserializeDocumentUnprocessedNamedQueryId(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedPreparedStatementName(v **types.UnprocessedPreparedStatementName, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnprocessedPreparedStatementName + if *v == nil { + sv = &types.UnprocessedPreparedStatementName{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.ErrorCode = ptr.String(jtv) + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "StatementName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StatementName to be of type string, got %T instead", value) + } + sv.StatementName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedPreparedStatementNameList(v *[]types.UnprocessedPreparedStatementName, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.UnprocessedPreparedStatementName + if *v == nil { + cv = []types.UnprocessedPreparedStatementName{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.UnprocessedPreparedStatementName + destAddr := &col + if err := awsAwsjson11_deserializeDocumentUnprocessedPreparedStatementName(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedQueryExecutionId(v **types.UnprocessedQueryExecutionId, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnprocessedQueryExecutionId + if *v == nil { + sv = &types.UnprocessedQueryExecutionId{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.ErrorCode = ptr.String(jtv) + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "QueryExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryExecutionId to be of type string, got %T instead", value) + } + sv.QueryExecutionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentUnprocessedQueryExecutionIdList(v *[]types.UnprocessedQueryExecutionId, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.UnprocessedQueryExecutionId + if *v == nil { + cv = []types.UnprocessedQueryExecutionId{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.UnprocessedQueryExecutionId + destAddr := &col + if err := awsAwsjson11_deserializeDocumentUnprocessedQueryExecutionId(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentWorkGroup(v **types.WorkGroup, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkGroup + if *v == nil { + sv = &types.WorkGroup{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Configuration": + if err := awsAwsjson11_deserializeDocumentWorkGroupConfiguration(&sv.Configuration, value); err != nil { + return err + } + + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupDescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "IdentityCenterApplicationArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdentityCenterApplicationArn to be of type string, got %T instead", value) + } + sv.IdentityCenterApplicationArn = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupState to be of type string, got %T instead", value) + } + sv.State = types.WorkGroupState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentWorkGroupConfiguration(v **types.WorkGroupConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkGroupConfiguration + if *v == nil { + sv = &types.WorkGroupConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AdditionalConfiguration": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.AdditionalConfiguration = ptr.String(jtv) + } + + case "BytesScannedCutoffPerQuery": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected BytesScannedCutoffValue to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.BytesScannedCutoffPerQuery = ptr.Int64(i64) + } + + case "CustomerContentEncryptionConfiguration": + if err := awsAwsjson11_deserializeDocumentCustomerContentEncryptionConfiguration(&sv.CustomerContentEncryptionConfiguration, value); err != nil { + return err + } + + case "EnableMinimumEncryptionConfiguration": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.EnableMinimumEncryptionConfiguration = ptr.Bool(jtv) + } + + case "EnforceWorkGroupConfiguration": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.EnforceWorkGroupConfiguration = ptr.Bool(jtv) + } + + case "EngineVersion": + if err := awsAwsjson11_deserializeDocumentEngineVersion(&sv.EngineVersion, value); err != nil { + return err + } + + case "ExecutionRole": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleArn to be of type string, got %T instead", value) + } + sv.ExecutionRole = ptr.String(jtv) + } + + case "IdentityCenterConfiguration": + if err := awsAwsjson11_deserializeDocumentIdentityCenterConfiguration(&sv.IdentityCenterConfiguration, value); err != nil { + return err + } + + case "PublishCloudWatchMetricsEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.PublishCloudWatchMetricsEnabled = ptr.Bool(jtv) + } + + case "QueryResultsS3AccessGrantsConfiguration": + if err := awsAwsjson11_deserializeDocumentQueryResultsS3AccessGrantsConfiguration(&sv.QueryResultsS3AccessGrantsConfiguration, value); err != nil { + return err + } + + case "RequesterPaysEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BoxedBoolean to be of type *bool, got %T instead", value) + } + sv.RequesterPaysEnabled = ptr.Bool(jtv) + } + + case "ResultConfiguration": + if err := awsAwsjson11_deserializeDocumentResultConfiguration(&sv.ResultConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentWorkGroupNamesList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentWorkGroupsList(v *[]types.WorkGroupSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.WorkGroupSummary + if *v == nil { + cv = []types.WorkGroupSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.WorkGroupSummary + destAddr := &col + if err := awsAwsjson11_deserializeDocumentWorkGroupSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentWorkGroupSummary(v **types.WorkGroupSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkGroupSummary + if *v == nil { + sv = &types.WorkGroupSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value) + + } + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupDescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "EngineVersion": + if err := awsAwsjson11_deserializeDocumentEngineVersion(&sv.EngineVersion, value); err != nil { + return err + } + + case "IdentityCenterApplicationArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdentityCenterApplicationArn to be of type string, got %T instead", value) + } + sv.IdentityCenterApplicationArn = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupState to be of type string, got %T instead", value) + } + sv.State = types.WorkGroupState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchGetNamedQueryOutput(v **BatchGetNamedQueryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchGetNamedQueryOutput + if *v == nil { + sv = &BatchGetNamedQueryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NamedQueries": + if err := awsAwsjson11_deserializeDocumentNamedQueryList(&sv.NamedQueries, value); err != nil { + return err + } + + case "UnprocessedNamedQueryIds": + if err := awsAwsjson11_deserializeDocumentUnprocessedNamedQueryIdList(&sv.UnprocessedNamedQueryIds, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchGetPreparedStatementOutput(v **BatchGetPreparedStatementOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchGetPreparedStatementOutput + if *v == nil { + sv = &BatchGetPreparedStatementOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PreparedStatements": + if err := awsAwsjson11_deserializeDocumentPreparedStatementDetailsList(&sv.PreparedStatements, value); err != nil { + return err + } + + case "UnprocessedPreparedStatementNames": + if err := awsAwsjson11_deserializeDocumentUnprocessedPreparedStatementNameList(&sv.UnprocessedPreparedStatementNames, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentBatchGetQueryExecutionOutput(v **BatchGetQueryExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *BatchGetQueryExecutionOutput + if *v == nil { + sv = &BatchGetQueryExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "QueryExecutions": + if err := awsAwsjson11_deserializeDocumentQueryExecutionList(&sv.QueryExecutions, value); err != nil { + return err + } + + case "UnprocessedQueryExecutionIds": + if err := awsAwsjson11_deserializeDocumentUnprocessedQueryExecutionIdList(&sv.UnprocessedQueryExecutionIds, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCancelCapacityReservationOutput(v **CancelCapacityReservationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CancelCapacityReservationOutput + if *v == nil { + sv = &CancelCapacityReservationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateCapacityReservationOutput(v **CreateCapacityReservationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateCapacityReservationOutput + if *v == nil { + sv = &CreateCapacityReservationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateDataCatalogOutput(v **CreateDataCatalogOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateDataCatalogOutput + if *v == nil { + sv = &CreateDataCatalogOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DataCatalog": + if err := awsAwsjson11_deserializeDocumentDataCatalog(&sv.DataCatalog, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateNamedQueryOutput(v **CreateNamedQueryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateNamedQueryOutput + if *v == nil { + sv = &CreateNamedQueryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NamedQueryId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NamedQueryId to be of type string, got %T instead", value) + } + sv.NamedQueryId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateNotebookOutput(v **CreateNotebookOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateNotebookOutput + if *v == nil { + sv = &CreateNotebookOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NotebookId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NotebookId to be of type string, got %T instead", value) + } + sv.NotebookId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreatePreparedStatementOutput(v **CreatePreparedStatementOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreatePreparedStatementOutput + if *v == nil { + sv = &CreatePreparedStatementOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreatePresignedNotebookUrlOutput(v **CreatePresignedNotebookUrlOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreatePresignedNotebookUrlOutput + if *v == nil { + sv = &CreatePresignedNotebookUrlOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AuthToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AuthToken to be of type string, got %T instead", value) + } + sv.AuthToken = ptr.String(jtv) + } + + case "AuthTokenExpirationTime": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.AuthTokenExpirationTime = ptr.Int64(i64) + } + + case "NotebookUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NotebookUrl = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateWorkGroupOutput(v **CreateWorkGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateWorkGroupOutput + if *v == nil { + sv = &CreateWorkGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteCapacityReservationOutput(v **DeleteCapacityReservationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteCapacityReservationOutput + if *v == nil { + sv = &DeleteCapacityReservationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteDataCatalogOutput(v **DeleteDataCatalogOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteDataCatalogOutput + if *v == nil { + sv = &DeleteDataCatalogOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DataCatalog": + if err := awsAwsjson11_deserializeDocumentDataCatalog(&sv.DataCatalog, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteNamedQueryOutput(v **DeleteNamedQueryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteNamedQueryOutput + if *v == nil { + sv = &DeleteNamedQueryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteNotebookOutput(v **DeleteNotebookOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteNotebookOutput + if *v == nil { + sv = &DeleteNotebookOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeletePreparedStatementOutput(v **DeletePreparedStatementOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeletePreparedStatementOutput + if *v == nil { + sv = &DeletePreparedStatementOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteWorkGroupOutput(v **DeleteWorkGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteWorkGroupOutput + if *v == nil { + sv = &DeleteWorkGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentExportNotebookOutput(v **ExportNotebookOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ExportNotebookOutput + if *v == nil { + sv = &ExportNotebookOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NotebookMetadata": + if err := awsAwsjson11_deserializeDocumentNotebookMetadata(&sv.NotebookMetadata, value); err != nil { + return err + } + + case "Payload": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Payload to be of type string, got %T instead", value) + } + sv.Payload = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetCalculationExecutionCodeOutput(v **GetCalculationExecutionCodeOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetCalculationExecutionCodeOutput + if *v == nil { + sv = &GetCalculationExecutionCodeOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CodeBlock": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CodeBlock to be of type string, got %T instead", value) + } + sv.CodeBlock = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetCalculationExecutionOutput(v **GetCalculationExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetCalculationExecutionOutput + if *v == nil { + sv = &GetCalculationExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CalculationExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CalculationExecutionId to be of type string, got %T instead", value) + } + sv.CalculationExecutionId = ptr.String(jtv) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Result": + if err := awsAwsjson11_deserializeDocumentCalculationResult(&sv.Result, value); err != nil { + return err + } + + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + case "Statistics": + if err := awsAwsjson11_deserializeDocumentCalculationStatistics(&sv.Statistics, value); err != nil { + return err + } + + case "Status": + if err := awsAwsjson11_deserializeDocumentCalculationStatus(&sv.Status, value); err != nil { + return err + } + + case "WorkingDirectory": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Uri to be of type string, got %T instead", value) + } + sv.WorkingDirectory = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetCalculationExecutionStatusOutput(v **GetCalculationExecutionStatusOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetCalculationExecutionStatusOutput + if *v == nil { + sv = &GetCalculationExecutionStatusOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Statistics": + if err := awsAwsjson11_deserializeDocumentCalculationStatistics(&sv.Statistics, value); err != nil { + return err + } + + case "Status": + if err := awsAwsjson11_deserializeDocumentCalculationStatus(&sv.Status, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetCapacityAssignmentConfigurationOutput(v **GetCapacityAssignmentConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetCapacityAssignmentConfigurationOutput + if *v == nil { + sv = &GetCapacityAssignmentConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CapacityAssignmentConfiguration": + if err := awsAwsjson11_deserializeDocumentCapacityAssignmentConfiguration(&sv.CapacityAssignmentConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetCapacityReservationOutput(v **GetCapacityReservationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetCapacityReservationOutput + if *v == nil { + sv = &GetCapacityReservationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CapacityReservation": + if err := awsAwsjson11_deserializeDocumentCapacityReservation(&sv.CapacityReservation, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetDatabaseOutput(v **GetDatabaseOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetDatabaseOutput + if *v == nil { + sv = &GetDatabaseOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Database": + if err := awsAwsjson11_deserializeDocumentDatabase(&sv.Database, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetDataCatalogOutput(v **GetDataCatalogOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetDataCatalogOutput + if *v == nil { + sv = &GetDataCatalogOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DataCatalog": + if err := awsAwsjson11_deserializeDocumentDataCatalog(&sv.DataCatalog, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetNamedQueryOutput(v **GetNamedQueryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetNamedQueryOutput + if *v == nil { + sv = &GetNamedQueryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NamedQuery": + if err := awsAwsjson11_deserializeDocumentNamedQuery(&sv.NamedQuery, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetNotebookMetadataOutput(v **GetNotebookMetadataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetNotebookMetadataOutput + if *v == nil { + sv = &GetNotebookMetadataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NotebookMetadata": + if err := awsAwsjson11_deserializeDocumentNotebookMetadata(&sv.NotebookMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetPreparedStatementOutput(v **GetPreparedStatementOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetPreparedStatementOutput + if *v == nil { + sv = &GetPreparedStatementOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PreparedStatement": + if err := awsAwsjson11_deserializeDocumentPreparedStatement(&sv.PreparedStatement, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetQueryExecutionOutput(v **GetQueryExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetQueryExecutionOutput + if *v == nil { + sv = &GetQueryExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "QueryExecution": + if err := awsAwsjson11_deserializeDocumentQueryExecution(&sv.QueryExecution, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetQueryResultsOutput(v **GetQueryResultsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetQueryResultsOutput + if *v == nil { + sv = &GetQueryResultsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "ResultSet": + if err := awsAwsjson11_deserializeDocumentResultSet(&sv.ResultSet, value); err != nil { + return err + } + + case "UpdateCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.UpdateCount = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetQueryRuntimeStatisticsOutput(v **GetQueryRuntimeStatisticsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetQueryRuntimeStatisticsOutput + if *v == nil { + sv = &GetQueryRuntimeStatisticsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "QueryRuntimeStatistics": + if err := awsAwsjson11_deserializeDocumentQueryRuntimeStatistics(&sv.QueryRuntimeStatistics, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetSessionOutput(v **GetSessionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetSessionOutput + if *v == nil { + sv = &GetSessionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionString to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "EngineConfiguration": + if err := awsAwsjson11_deserializeDocumentEngineConfiguration(&sv.EngineConfiguration, value); err != nil { + return err + } + + case "EngineVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.EngineVersion = ptr.String(jtv) + } + + case "NotebookVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NameString to be of type string, got %T instead", value) + } + sv.NotebookVersion = ptr.String(jtv) + } + + case "SessionConfiguration": + if err := awsAwsjson11_deserializeDocumentSessionConfiguration(&sv.SessionConfiguration, value); err != nil { + return err + } + + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + case "Statistics": + if err := awsAwsjson11_deserializeDocumentSessionStatistics(&sv.Statistics, value); err != nil { + return err + } + + case "Status": + if err := awsAwsjson11_deserializeDocumentSessionStatus(&sv.Status, value); err != nil { + return err + } + + case "WorkGroup": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected WorkGroupName to be of type string, got %T instead", value) + } + sv.WorkGroup = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetSessionStatusOutput(v **GetSessionStatusOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetSessionStatusOutput + if *v == nil { + sv = &GetSessionStatusOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + case "Status": + if err := awsAwsjson11_deserializeDocumentSessionStatus(&sv.Status, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetTableMetadataOutput(v **GetTableMetadataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetTableMetadataOutput + if *v == nil { + sv = &GetTableMetadataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TableMetadata": + if err := awsAwsjson11_deserializeDocumentTableMetadata(&sv.TableMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetWorkGroupOutput(v **GetWorkGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetWorkGroupOutput + if *v == nil { + sv = &GetWorkGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "WorkGroup": + if err := awsAwsjson11_deserializeDocumentWorkGroup(&sv.WorkGroup, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentImportNotebookOutput(v **ImportNotebookOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ImportNotebookOutput + if *v == nil { + sv = &ImportNotebookOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NotebookId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NotebookId to be of type string, got %T instead", value) + } + sv.NotebookId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListApplicationDPUSizesOutput(v **ListApplicationDPUSizesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListApplicationDPUSizesOutput + if *v == nil { + sv = &ListApplicationDPUSizesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ApplicationDPUSizes": + if err := awsAwsjson11_deserializeDocumentApplicationDPUSizesList(&sv.ApplicationDPUSizes, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListCalculationExecutionsOutput(v **ListCalculationExecutionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListCalculationExecutionsOutput + if *v == nil { + sv = &ListCalculationExecutionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Calculations": + if err := awsAwsjson11_deserializeDocumentCalculationsList(&sv.Calculations, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionManagerToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListCapacityReservationsOutput(v **ListCapacityReservationsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListCapacityReservationsOutput + if *v == nil { + sv = &ListCapacityReservationsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CapacityReservations": + if err := awsAwsjson11_deserializeDocumentCapacityReservationsList(&sv.CapacityReservations, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListDatabasesOutput(v **ListDatabasesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListDatabasesOutput + if *v == nil { + sv = &ListDatabasesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DatabaseList": + if err := awsAwsjson11_deserializeDocumentDatabaseList(&sv.DatabaseList, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListDataCatalogsOutput(v **ListDataCatalogsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListDataCatalogsOutput + if *v == nil { + sv = &ListDataCatalogsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DataCatalogsSummary": + if err := awsAwsjson11_deserializeDocumentDataCatalogSummaryList(&sv.DataCatalogsSummary, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListEngineVersionsOutput(v **ListEngineVersionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListEngineVersionsOutput + if *v == nil { + sv = &ListEngineVersionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EngineVersions": + if err := awsAwsjson11_deserializeDocumentEngineVersionsList(&sv.EngineVersions, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListExecutorsOutput(v **ListExecutorsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListExecutorsOutput + if *v == nil { + sv = &ListExecutorsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ExecutorsSummary": + if err := awsAwsjson11_deserializeDocumentExecutorsSummaryList(&sv.ExecutorsSummary, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionManagerToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListNamedQueriesOutput(v **ListNamedQueriesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListNamedQueriesOutput + if *v == nil { + sv = &ListNamedQueriesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NamedQueryIds": + if err := awsAwsjson11_deserializeDocumentNamedQueryIdList(&sv.NamedQueryIds, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListNotebookMetadataOutput(v **ListNotebookMetadataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListNotebookMetadataOutput + if *v == nil { + sv = &ListNotebookMetadataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "NotebookMetadataList": + if err := awsAwsjson11_deserializeDocumentNotebookMetadataArray(&sv.NotebookMetadataList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListNotebookSessionsOutput(v **ListNotebookSessionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListNotebookSessionsOutput + if *v == nil { + sv = &ListNotebookSessionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "NotebookSessionsList": + if err := awsAwsjson11_deserializeDocumentNotebookSessionsList(&sv.NotebookSessionsList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListPreparedStatementsOutput(v **ListPreparedStatementsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListPreparedStatementsOutput + if *v == nil { + sv = &ListPreparedStatementsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "PreparedStatements": + if err := awsAwsjson11_deserializeDocumentPreparedStatementsList(&sv.PreparedStatements, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListQueryExecutionsOutput(v **ListQueryExecutionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListQueryExecutionsOutput + if *v == nil { + sv = &ListQueryExecutionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "QueryExecutionIds": + if err := awsAwsjson11_deserializeDocumentQueryExecutionIdList(&sv.QueryExecutionIds, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListSessionsOutput(v **ListSessionsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListSessionsOutput + if *v == nil { + sv = &ListSessionsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionManagerToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "Sessions": + if err := awsAwsjson11_deserializeDocumentSessionsList(&sv.Sessions, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTableMetadataOutput(v **ListTableMetadataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTableMetadataOutput + if *v == nil { + sv = &ListTableMetadataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "TableMetadataList": + if err := awsAwsjson11_deserializeDocumentTableMetadataList(&sv.TableMetadataList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "Tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListWorkGroupsOutput(v **ListWorkGroupsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListWorkGroupsOutput + if *v == nil { + sv = &ListWorkGroupsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Token to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "WorkGroups": + if err := awsAwsjson11_deserializeDocumentWorkGroupsList(&sv.WorkGroups, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentPutCapacityAssignmentConfigurationOutput(v **PutCapacityAssignmentConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutCapacityAssignmentConfigurationOutput + if *v == nil { + sv = &PutCapacityAssignmentConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStartCalculationExecutionOutput(v **StartCalculationExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartCalculationExecutionOutput + if *v == nil { + sv = &StartCalculationExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CalculationExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CalculationExecutionId to be of type string, got %T instead", value) + } + sv.CalculationExecutionId = ptr.String(jtv) + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CalculationExecutionState to be of type string, got %T instead", value) + } + sv.State = types.CalculationExecutionState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStartQueryExecutionOutput(v **StartQueryExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartQueryExecutionOutput + if *v == nil { + sv = &StartQueryExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "QueryExecutionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected QueryExecutionId to be of type string, got %T instead", value) + } + sv.QueryExecutionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStartSessionOutput(v **StartSessionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartSessionOutput + if *v == nil { + sv = &StartSessionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SessionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionId to be of type string, got %T instead", value) + } + sv.SessionId = ptr.String(jtv) + } + + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionState to be of type string, got %T instead", value) + } + sv.State = types.SessionState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStopCalculationExecutionOutput(v **StopCalculationExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StopCalculationExecutionOutput + if *v == nil { + sv = &StopCalculationExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CalculationExecutionState to be of type string, got %T instead", value) + } + sv.State = types.CalculationExecutionState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentStopQueryExecutionOutput(v **StopQueryExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StopQueryExecutionOutput + if *v == nil { + sv = &StopQueryExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentTagResourceOutput(v **TagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TagResourceOutput + if *v == nil { + sv = &TagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentTerminateSessionOutput(v **TerminateSessionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *TerminateSessionOutput + if *v == nil { + sv = &TerminateSessionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "State": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionState to be of type string, got %T instead", value) + } + sv.State = types.SessionState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUntagResourceOutput(v **UntagResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UntagResourceOutput + if *v == nil { + sv = &UntagResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateCapacityReservationOutput(v **UpdateCapacityReservationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateCapacityReservationOutput + if *v == nil { + sv = &UpdateCapacityReservationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateDataCatalogOutput(v **UpdateDataCatalogOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateDataCatalogOutput + if *v == nil { + sv = &UpdateDataCatalogOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateNamedQueryOutput(v **UpdateNamedQueryOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateNamedQueryOutput + if *v == nil { + sv = &UpdateNamedQueryOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateNotebookMetadataOutput(v **UpdateNotebookMetadataOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateNotebookMetadataOutput + if *v == nil { + sv = &UpdateNotebookMetadataOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateNotebookOutput(v **UpdateNotebookOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateNotebookOutput + if *v == nil { + sv = &UpdateNotebookOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdatePreparedStatementOutput(v **UpdatePreparedStatementOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdatePreparedStatementOutput + if *v == nil { + sv = &UpdatePreparedStatementOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateWorkGroupOutput(v **UpdateWorkGroupOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateWorkGroupOutput + if *v == nil { + sv = &UpdateWorkGroupOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type protocolErrorInfo struct { + Type string `json:"__type"` + Message string + Code any // nonstandard for awsjson but some services do present the type here +} + +func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) { + var errInfo protocolErrorInfo + if err := decoder.Decode(&errInfo); err != nil { + if err == io.EOF { + return errInfo, nil + } + return errInfo, err + } + + return errInfo, nil +} + +func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) { + if len(headerType) != 0 { + return headerType, true + } else if len(bodyInfo.Type) != 0 { + return bodyInfo.Type, true + } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 { + return code, true + } + return "", false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/doc.go new file mode 100644 index 00000000..719025bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/doc.go @@ -0,0 +1,20 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package athena provides the API client, operations, and parameter types for +// Amazon Athena. +// +// Amazon Athena is an interactive query service that lets you use standard SQL to +// analyze data directly in Amazon S3. You can point Athena at your data in Amazon +// S3 and run ad-hoc queries and get results in seconds. Athena is serverless, so +// there is no infrastructure to set up or manage. You pay only for the queries you +// run. Athena scales automatically—executing queries in parallel—so results are +// fast, even with large datasets and complex queries. For more information, see [What is Amazon Athena] +// in the Amazon Athena User Guide. +// +// If you connect to Athena using the JDBC driver, use version 1.1.0 of the driver +// or later with the Amazon Athena API. Earlier version drivers do not support the +// API. For more information and to download the driver, see [Accessing Amazon Athena with JDBC]. +// +// [Accessing Amazon Athena with JDBC]: https://docs.aws.amazon.com/athena/latest/ug/connect-with-jdbc.html +// [What is Amazon Athena]: http://docs.aws.amazon.com/athena/latest/ug/what-is.html +package athena diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/endpoints.go new file mode 100644 index 00000000..bc4e2563 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/endpoints.go @@ -0,0 +1,537 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/athena/internal/endpoints" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "athena" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_ATHENA") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "Athena", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://athena-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if _PartitionResult.SupportsFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://athena-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://athena.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://athena.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/generated.json new file mode 100644 index 00000000..37e93aa1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/generated.json @@ -0,0 +1,101 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_BatchGetNamedQuery.go", + "api_op_BatchGetPreparedStatement.go", + "api_op_BatchGetQueryExecution.go", + "api_op_CancelCapacityReservation.go", + "api_op_CreateCapacityReservation.go", + "api_op_CreateDataCatalog.go", + "api_op_CreateNamedQuery.go", + "api_op_CreateNotebook.go", + "api_op_CreatePreparedStatement.go", + "api_op_CreatePresignedNotebookUrl.go", + "api_op_CreateWorkGroup.go", + "api_op_DeleteCapacityReservation.go", + "api_op_DeleteDataCatalog.go", + "api_op_DeleteNamedQuery.go", + "api_op_DeleteNotebook.go", + "api_op_DeletePreparedStatement.go", + "api_op_DeleteWorkGroup.go", + "api_op_ExportNotebook.go", + "api_op_GetCalculationExecution.go", + "api_op_GetCalculationExecutionCode.go", + "api_op_GetCalculationExecutionStatus.go", + "api_op_GetCapacityAssignmentConfiguration.go", + "api_op_GetCapacityReservation.go", + "api_op_GetDataCatalog.go", + "api_op_GetDatabase.go", + "api_op_GetNamedQuery.go", + "api_op_GetNotebookMetadata.go", + "api_op_GetPreparedStatement.go", + "api_op_GetQueryExecution.go", + "api_op_GetQueryResults.go", + "api_op_GetQueryRuntimeStatistics.go", + "api_op_GetSession.go", + "api_op_GetSessionStatus.go", + "api_op_GetTableMetadata.go", + "api_op_GetWorkGroup.go", + "api_op_ImportNotebook.go", + "api_op_ListApplicationDPUSizes.go", + "api_op_ListCalculationExecutions.go", + "api_op_ListCapacityReservations.go", + "api_op_ListDataCatalogs.go", + "api_op_ListDatabases.go", + "api_op_ListEngineVersions.go", + "api_op_ListExecutors.go", + "api_op_ListNamedQueries.go", + "api_op_ListNotebookMetadata.go", + "api_op_ListNotebookSessions.go", + "api_op_ListPreparedStatements.go", + "api_op_ListQueryExecutions.go", + "api_op_ListSessions.go", + "api_op_ListTableMetadata.go", + "api_op_ListTagsForResource.go", + "api_op_ListWorkGroups.go", + "api_op_PutCapacityAssignmentConfiguration.go", + "api_op_StartCalculationExecution.go", + "api_op_StartQueryExecution.go", + "api_op_StartSession.go", + "api_op_StopCalculationExecution.go", + "api_op_StopQueryExecution.go", + "api_op_TagResource.go", + "api_op_TerminateSession.go", + "api_op_UntagResource.go", + "api_op_UpdateCapacityReservation.go", + "api_op_UpdateDataCatalog.go", + "api_op_UpdateNamedQuery.go", + "api_op_UpdateNotebook.go", + "api_op_UpdateNotebookMetadata.go", + "api_op_UpdatePreparedStatement.go", + "api_op_UpdateWorkGroup.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "sra_operation_order_test.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.22", + "module": "github.com/aws/aws-sdk-go-v2/service/athena", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/go_module_metadata.go new file mode 100644 index 00000000..273bca15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package athena + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.50.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/internal/endpoints/endpoints.go new file mode 100644 index 00000000..070d369d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/internal/endpoints/endpoints.go @@ -0,0 +1,823 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver Athena endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "af-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.af-south-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-east-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-northeast-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-northeast-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-northeast-3.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-south-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-south-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-southeast-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-southeast-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-southeast-3.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-southeast-4.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ap-southeast-5.api.aws", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.ca-central-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ca-central-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.ca-west-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.ca-west-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-central-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-central-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-north-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-south-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-south-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-west-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-west-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.eu-west-3.api.aws", + }, + endpoints.EndpointKey{ + Region: "fips-ca-central-1", + }: endpoints.Endpoint{ + Hostname: "athena-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-ca-west-1", + }: endpoints.Endpoint{ + Hostname: "athena-fips.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.il-central-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.me-central-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.me-south-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.sa-east-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.us-east-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.us-east-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.us-east-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.us-east-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.us-west-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.us-west-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.us-west-2.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.us-west-2.api.aws", + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-north-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "eu-isoe-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "athena.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.us-gov-east-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "athena.us-gov-west-1.api.aws", + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/options.go new file mode 100644 index 00000000..ab02f18c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/options.go @@ -0,0 +1,236 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // Provides idempotency tokens values that will be automatically populated into + // idempotent API operations. + IdempotencyTokenProvider IdempotencyTokenProvider + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The client meter provider. + MeterProvider metrics.MeterProvider + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The client tracer provider. + TracerProvider tracing.TracerProvider + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/serializers.go new file mode 100644 index 00000000..9b0d543a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/serializers.go @@ -0,0 +1,6148 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsjson11_serializeOpBatchGetNamedQuery struct { +} + +func (*awsAwsjson11_serializeOpBatchGetNamedQuery) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchGetNamedQuery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchGetNamedQueryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.BatchGetNamedQuery") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchGetNamedQueryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpBatchGetPreparedStatement struct { +} + +func (*awsAwsjson11_serializeOpBatchGetPreparedStatement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchGetPreparedStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchGetPreparedStatementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.BatchGetPreparedStatement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchGetPreparedStatementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpBatchGetQueryExecution struct { +} + +func (*awsAwsjson11_serializeOpBatchGetQueryExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpBatchGetQueryExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*BatchGetQueryExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.BatchGetQueryExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentBatchGetQueryExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCancelCapacityReservation struct { +} + +func (*awsAwsjson11_serializeOpCancelCapacityReservation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCancelCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CancelCapacityReservationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CancelCapacityReservation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCancelCapacityReservationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateCapacityReservation struct { +} + +func (*awsAwsjson11_serializeOpCreateCapacityReservation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateCapacityReservationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CreateCapacityReservation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateCapacityReservationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateDataCatalog struct { +} + +func (*awsAwsjson11_serializeOpCreateDataCatalog) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateDataCatalog) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateDataCatalogInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CreateDataCatalog") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateDataCatalogInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateNamedQuery struct { +} + +func (*awsAwsjson11_serializeOpCreateNamedQuery) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateNamedQuery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateNamedQueryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CreateNamedQuery") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateNamedQueryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateNotebook struct { +} + +func (*awsAwsjson11_serializeOpCreateNotebook) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateNotebook) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateNotebookInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CreateNotebook") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateNotebookInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreatePreparedStatement struct { +} + +func (*awsAwsjson11_serializeOpCreatePreparedStatement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreatePreparedStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreatePreparedStatementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CreatePreparedStatement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreatePreparedStatementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreatePresignedNotebookUrl struct { +} + +func (*awsAwsjson11_serializeOpCreatePresignedNotebookUrl) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreatePresignedNotebookUrl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreatePresignedNotebookUrlInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CreatePresignedNotebookUrl") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreatePresignedNotebookUrlInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateWorkGroup struct { +} + +func (*awsAwsjson11_serializeOpCreateWorkGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateWorkGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateWorkGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.CreateWorkGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateWorkGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteCapacityReservation struct { +} + +func (*awsAwsjson11_serializeOpDeleteCapacityReservation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteCapacityReservationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.DeleteCapacityReservation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteCapacityReservationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteDataCatalog struct { +} + +func (*awsAwsjson11_serializeOpDeleteDataCatalog) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteDataCatalog) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteDataCatalogInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.DeleteDataCatalog") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteDataCatalogInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteNamedQuery struct { +} + +func (*awsAwsjson11_serializeOpDeleteNamedQuery) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteNamedQuery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteNamedQueryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.DeleteNamedQuery") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteNamedQueryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteNotebook struct { +} + +func (*awsAwsjson11_serializeOpDeleteNotebook) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteNotebook) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteNotebookInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.DeleteNotebook") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteNotebookInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeletePreparedStatement struct { +} + +func (*awsAwsjson11_serializeOpDeletePreparedStatement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeletePreparedStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeletePreparedStatementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.DeletePreparedStatement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeletePreparedStatementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteWorkGroup struct { +} + +func (*awsAwsjson11_serializeOpDeleteWorkGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteWorkGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteWorkGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.DeleteWorkGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteWorkGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpExportNotebook struct { +} + +func (*awsAwsjson11_serializeOpExportNotebook) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpExportNotebook) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ExportNotebookInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ExportNotebook") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentExportNotebookInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetCalculationExecution struct { +} + +func (*awsAwsjson11_serializeOpGetCalculationExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetCalculationExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCalculationExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetCalculationExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetCalculationExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetCalculationExecutionCode struct { +} + +func (*awsAwsjson11_serializeOpGetCalculationExecutionCode) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetCalculationExecutionCode) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCalculationExecutionCodeInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetCalculationExecutionCode") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetCalculationExecutionCodeInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetCalculationExecutionStatus struct { +} + +func (*awsAwsjson11_serializeOpGetCalculationExecutionStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetCalculationExecutionStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCalculationExecutionStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetCalculationExecutionStatus") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetCalculationExecutionStatusInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetCapacityAssignmentConfiguration struct { +} + +func (*awsAwsjson11_serializeOpGetCapacityAssignmentConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetCapacityAssignmentConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCapacityAssignmentConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetCapacityAssignmentConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetCapacityAssignmentConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetCapacityReservation struct { +} + +func (*awsAwsjson11_serializeOpGetCapacityReservation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCapacityReservationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetCapacityReservation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetCapacityReservationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetDatabase struct { +} + +func (*awsAwsjson11_serializeOpGetDatabase) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetDatabase) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDatabaseInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetDatabase") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetDatabaseInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetDataCatalog struct { +} + +func (*awsAwsjson11_serializeOpGetDataCatalog) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetDataCatalog) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDataCatalogInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetDataCatalog") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetDataCatalogInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetNamedQuery struct { +} + +func (*awsAwsjson11_serializeOpGetNamedQuery) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetNamedQuery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetNamedQueryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetNamedQuery") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetNamedQueryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetNotebookMetadata struct { +} + +func (*awsAwsjson11_serializeOpGetNotebookMetadata) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetNotebookMetadata) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetNotebookMetadataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetNotebookMetadata") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetNotebookMetadataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetPreparedStatement struct { +} + +func (*awsAwsjson11_serializeOpGetPreparedStatement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetPreparedStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetPreparedStatementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetPreparedStatement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetPreparedStatementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetQueryExecution struct { +} + +func (*awsAwsjson11_serializeOpGetQueryExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetQueryExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetQueryExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetQueryExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetQueryExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetQueryResults struct { +} + +func (*awsAwsjson11_serializeOpGetQueryResults) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetQueryResults) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetQueryResultsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetQueryResults") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetQueryResultsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetQueryRuntimeStatistics struct { +} + +func (*awsAwsjson11_serializeOpGetQueryRuntimeStatistics) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetQueryRuntimeStatistics) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetQueryRuntimeStatisticsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetQueryRuntimeStatistics") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetQueryRuntimeStatisticsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetSession struct { +} + +func (*awsAwsjson11_serializeOpGetSession) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetSession") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetSessionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetSessionStatus struct { +} + +func (*awsAwsjson11_serializeOpGetSessionStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetSessionStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetSessionStatus") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetSessionStatusInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetTableMetadata struct { +} + +func (*awsAwsjson11_serializeOpGetTableMetadata) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetTableMetadata) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetTableMetadataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetTableMetadata") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetTableMetadataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetWorkGroup struct { +} + +func (*awsAwsjson11_serializeOpGetWorkGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetWorkGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetWorkGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.GetWorkGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetWorkGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpImportNotebook struct { +} + +func (*awsAwsjson11_serializeOpImportNotebook) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpImportNotebook) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ImportNotebookInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ImportNotebook") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentImportNotebookInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListApplicationDPUSizes struct { +} + +func (*awsAwsjson11_serializeOpListApplicationDPUSizes) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListApplicationDPUSizes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListApplicationDPUSizesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListApplicationDPUSizes") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListApplicationDPUSizesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListCalculationExecutions struct { +} + +func (*awsAwsjson11_serializeOpListCalculationExecutions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListCalculationExecutions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListCalculationExecutionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListCalculationExecutions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListCalculationExecutionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListCapacityReservations struct { +} + +func (*awsAwsjson11_serializeOpListCapacityReservations) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListCapacityReservations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListCapacityReservationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListCapacityReservations") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListCapacityReservationsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListDatabases struct { +} + +func (*awsAwsjson11_serializeOpListDatabases) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListDatabases) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDatabasesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListDatabases") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListDatabasesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListDataCatalogs struct { +} + +func (*awsAwsjson11_serializeOpListDataCatalogs) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListDataCatalogs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDataCatalogsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListDataCatalogs") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListDataCatalogsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListEngineVersions struct { +} + +func (*awsAwsjson11_serializeOpListEngineVersions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListEngineVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListEngineVersionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListEngineVersions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListEngineVersionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListExecutors struct { +} + +func (*awsAwsjson11_serializeOpListExecutors) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListExecutors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListExecutorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListExecutors") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListExecutorsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListNamedQueries struct { +} + +func (*awsAwsjson11_serializeOpListNamedQueries) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListNamedQueries) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListNamedQueriesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListNamedQueries") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListNamedQueriesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListNotebookMetadata struct { +} + +func (*awsAwsjson11_serializeOpListNotebookMetadata) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListNotebookMetadata) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListNotebookMetadataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListNotebookMetadata") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListNotebookMetadataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListNotebookSessions struct { +} + +func (*awsAwsjson11_serializeOpListNotebookSessions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListNotebookSessions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListNotebookSessionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListNotebookSessions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListNotebookSessionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListPreparedStatements struct { +} + +func (*awsAwsjson11_serializeOpListPreparedStatements) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListPreparedStatements) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListPreparedStatementsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListPreparedStatements") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListPreparedStatementsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListQueryExecutions struct { +} + +func (*awsAwsjson11_serializeOpListQueryExecutions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListQueryExecutions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListQueryExecutionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListQueryExecutions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListQueryExecutionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListSessions struct { +} + +func (*awsAwsjson11_serializeOpListSessions) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListSessions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListSessionsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListSessions") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListSessionsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListTableMetadata struct { +} + +func (*awsAwsjson11_serializeOpListTableMetadata) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTableMetadata) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTableMetadataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListTableMetadata") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTableMetadataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListTagsForResource struct { +} + +func (*awsAwsjson11_serializeOpListTagsForResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsForResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListTagsForResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListTagsForResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListWorkGroups struct { +} + +func (*awsAwsjson11_serializeOpListWorkGroups) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListWorkGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListWorkGroupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.ListWorkGroups") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListWorkGroupsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutCapacityAssignmentConfiguration struct { +} + +func (*awsAwsjson11_serializeOpPutCapacityAssignmentConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutCapacityAssignmentConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutCapacityAssignmentConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.PutCapacityAssignmentConfiguration") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutCapacityAssignmentConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpStartCalculationExecution struct { +} + +func (*awsAwsjson11_serializeOpStartCalculationExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStartCalculationExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartCalculationExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.StartCalculationExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStartCalculationExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpStartQueryExecution struct { +} + +func (*awsAwsjson11_serializeOpStartQueryExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStartQueryExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartQueryExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.StartQueryExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStartQueryExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpStartSession struct { +} + +func (*awsAwsjson11_serializeOpStartSession) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStartSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartSessionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.StartSession") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStartSessionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpStopCalculationExecution struct { +} + +func (*awsAwsjson11_serializeOpStopCalculationExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStopCalculationExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StopCalculationExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.StopCalculationExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStopCalculationExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpStopQueryExecution struct { +} + +func (*awsAwsjson11_serializeOpStopQueryExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpStopQueryExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StopQueryExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.StopQueryExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentStopQueryExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTerminateSession struct { +} + +func (*awsAwsjson11_serializeOpTerminateSession) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTerminateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TerminateSessionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.TerminateSession") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTerminateSessionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateCapacityReservation struct { +} + +func (*awsAwsjson11_serializeOpUpdateCapacityReservation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateCapacityReservation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateCapacityReservationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UpdateCapacityReservation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateCapacityReservationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateDataCatalog struct { +} + +func (*awsAwsjson11_serializeOpUpdateDataCatalog) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateDataCatalog) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateDataCatalogInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UpdateDataCatalog") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateDataCatalogInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateNamedQuery struct { +} + +func (*awsAwsjson11_serializeOpUpdateNamedQuery) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateNamedQuery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateNamedQueryInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UpdateNamedQuery") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateNamedQueryInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateNotebook struct { +} + +func (*awsAwsjson11_serializeOpUpdateNotebook) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateNotebook) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateNotebookInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UpdateNotebook") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateNotebookInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateNotebookMetadata struct { +} + +func (*awsAwsjson11_serializeOpUpdateNotebookMetadata) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateNotebookMetadata) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateNotebookMetadataInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UpdateNotebookMetadata") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateNotebookMetadataInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdatePreparedStatement struct { +} + +func (*awsAwsjson11_serializeOpUpdatePreparedStatement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdatePreparedStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdatePreparedStatementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UpdatePreparedStatement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdatePreparedStatementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateWorkGroup struct { +} + +func (*awsAwsjson11_serializeOpUpdateWorkGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateWorkGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateWorkGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("AmazonAthena.UpdateWorkGroup") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateWorkGroupInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentAclConfiguration(v *types.AclConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.S3AclOption) > 0 { + ok := object.Key("S3AclOption") + ok.String(string(v.S3AclOption)) + } + + return nil +} + +func awsAwsjson11_serializeDocumentCalculationConfiguration(v *types.CalculationConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CodeBlock != nil { + ok := object.Key("CodeBlock") + ok.String(*v.CodeBlock) + } + + return nil +} + +func awsAwsjson11_serializeDocumentCapacityAssignment(v *types.CapacityAssignment, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.WorkGroupNames != nil { + ok := object.Key("WorkGroupNames") + if err := awsAwsjson11_serializeDocumentWorkGroupNamesList(v.WorkGroupNames, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentCapacityAssignmentsList(v []types.CapacityAssignment, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentCapacityAssignment(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentCustomerContentEncryptionConfiguration(v *types.CustomerContentEncryptionConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KmsKey != nil { + ok := object.Key("KmsKey") + ok.String(*v.KmsKey) + } + + return nil +} + +func awsAwsjson11_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.EncryptionOption) > 0 { + ok := object.Key("EncryptionOption") + ok.String(string(v.EncryptionOption)) + } + + if v.KmsKey != nil { + ok := object.Key("KmsKey") + ok.String(*v.KmsKey) + } + + return nil +} + +func awsAwsjson11_serializeDocumentEngineConfiguration(v *types.EngineConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AdditionalConfigs != nil { + ok := object.Key("AdditionalConfigs") + if err := awsAwsjson11_serializeDocumentParametersMap(v.AdditionalConfigs, ok); err != nil { + return err + } + } + + if v.CoordinatorDpuSize != nil { + ok := object.Key("CoordinatorDpuSize") + ok.Integer(*v.CoordinatorDpuSize) + } + + if v.DefaultExecutorDpuSize != nil { + ok := object.Key("DefaultExecutorDpuSize") + ok.Integer(*v.DefaultExecutorDpuSize) + } + + if v.MaxConcurrentDpus != nil { + ok := object.Key("MaxConcurrentDpus") + ok.Integer(*v.MaxConcurrentDpus) + } + + if v.SparkProperties != nil { + ok := object.Key("SparkProperties") + if err := awsAwsjson11_serializeDocumentParametersMap(v.SparkProperties, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentEngineVersion(v *types.EngineVersion, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EffectiveEngineVersion != nil { + ok := object.Key("EffectiveEngineVersion") + ok.String(*v.EffectiveEngineVersion) + } + + if v.SelectedEngineVersion != nil { + ok := object.Key("SelectedEngineVersion") + ok.String(*v.SelectedEngineVersion) + } + + return nil +} + +func awsAwsjson11_serializeDocumentExecutionParameters(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentFilterDefinition(v *types.FilterDefinition, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeDocumentIdentityCenterConfiguration(v *types.IdentityCenterConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EnableIdentityCenter != nil { + ok := object.Key("EnableIdentityCenter") + ok.Boolean(*v.EnableIdentityCenter) + } + + if v.IdentityCenterInstanceArn != nil { + ok := object.Key("IdentityCenterInstanceArn") + ok.String(*v.IdentityCenterInstanceArn) + } + + return nil +} + +func awsAwsjson11_serializeDocumentNamedQueryIdList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentParametersMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentPreparedStatementNameList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentQueryExecutionContext(v *types.QueryExecutionContext, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Catalog != nil { + ok := object.Key("Catalog") + ok.String(*v.Catalog) + } + + if v.Database != nil { + ok := object.Key("Database") + ok.String(*v.Database) + } + + return nil +} + +func awsAwsjson11_serializeDocumentQueryExecutionIdList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentQueryResultsS3AccessGrantsConfiguration(v *types.QueryResultsS3AccessGrantsConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.AuthenticationType) > 0 { + ok := object.Key("AuthenticationType") + ok.String(string(v.AuthenticationType)) + } + + if v.CreateUserLevelPrefix != nil { + ok := object.Key("CreateUserLevelPrefix") + ok.Boolean(*v.CreateUserLevelPrefix) + } + + if v.EnableS3AccessGrants != nil { + ok := object.Key("EnableS3AccessGrants") + ok.Boolean(*v.EnableS3AccessGrants) + } + + return nil +} + +func awsAwsjson11_serializeDocumentResultConfiguration(v *types.ResultConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AclConfiguration != nil { + ok := object.Key("AclConfiguration") + if err := awsAwsjson11_serializeDocumentAclConfiguration(v.AclConfiguration, ok); err != nil { + return err + } + } + + if v.EncryptionConfiguration != nil { + ok := object.Key("EncryptionConfiguration") + if err := awsAwsjson11_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, ok); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil { + ok := object.Key("ExpectedBucketOwner") + ok.String(*v.ExpectedBucketOwner) + } + + if v.OutputLocation != nil { + ok := object.Key("OutputLocation") + ok.String(*v.OutputLocation) + } + + return nil +} + +func awsAwsjson11_serializeDocumentResultConfigurationUpdates(v *types.ResultConfigurationUpdates, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AclConfiguration != nil { + ok := object.Key("AclConfiguration") + if err := awsAwsjson11_serializeDocumentAclConfiguration(v.AclConfiguration, ok); err != nil { + return err + } + } + + if v.EncryptionConfiguration != nil { + ok := object.Key("EncryptionConfiguration") + if err := awsAwsjson11_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, ok); err != nil { + return err + } + } + + if v.ExpectedBucketOwner != nil { + ok := object.Key("ExpectedBucketOwner") + ok.String(*v.ExpectedBucketOwner) + } + + if v.OutputLocation != nil { + ok := object.Key("OutputLocation") + ok.String(*v.OutputLocation) + } + + if v.RemoveAclConfiguration != nil { + ok := object.Key("RemoveAclConfiguration") + ok.Boolean(*v.RemoveAclConfiguration) + } + + if v.RemoveEncryptionConfiguration != nil { + ok := object.Key("RemoveEncryptionConfiguration") + ok.Boolean(*v.RemoveEncryptionConfiguration) + } + + if v.RemoveExpectedBucketOwner != nil { + ok := object.Key("RemoveExpectedBucketOwner") + ok.Boolean(*v.RemoveExpectedBucketOwner) + } + + if v.RemoveOutputLocation != nil { + ok := object.Key("RemoveOutputLocation") + ok.Boolean(*v.RemoveOutputLocation) + } + + return nil +} + +func awsAwsjson11_serializeDocumentResultReuseByAgeConfiguration(v *types.ResultReuseByAgeConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("Enabled") + ok.Boolean(v.Enabled) + } + + if v.MaxAgeInMinutes != nil { + ok := object.Key("MaxAgeInMinutes") + ok.Integer(*v.MaxAgeInMinutes) + } + + return nil +} + +func awsAwsjson11_serializeDocumentResultReuseConfiguration(v *types.ResultReuseConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResultReuseByAgeConfiguration != nil { + ok := object.Key("ResultReuseByAgeConfiguration") + if err := awsAwsjson11_serializeDocumentResultReuseByAgeConfiguration(v.ResultReuseByAgeConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Key != nil { + ok := object.Key("Key") + ok.String(*v.Key) + } + + if v.Value != nil { + ok := object.Key("Value") + ok.String(*v.Value) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeDocumentWorkGroupConfiguration(v *types.WorkGroupConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AdditionalConfiguration != nil { + ok := object.Key("AdditionalConfiguration") + ok.String(*v.AdditionalConfiguration) + } + + if v.BytesScannedCutoffPerQuery != nil { + ok := object.Key("BytesScannedCutoffPerQuery") + ok.Long(*v.BytesScannedCutoffPerQuery) + } + + if v.CustomerContentEncryptionConfiguration != nil { + ok := object.Key("CustomerContentEncryptionConfiguration") + if err := awsAwsjson11_serializeDocumentCustomerContentEncryptionConfiguration(v.CustomerContentEncryptionConfiguration, ok); err != nil { + return err + } + } + + if v.EnableMinimumEncryptionConfiguration != nil { + ok := object.Key("EnableMinimumEncryptionConfiguration") + ok.Boolean(*v.EnableMinimumEncryptionConfiguration) + } + + if v.EnforceWorkGroupConfiguration != nil { + ok := object.Key("EnforceWorkGroupConfiguration") + ok.Boolean(*v.EnforceWorkGroupConfiguration) + } + + if v.EngineVersion != nil { + ok := object.Key("EngineVersion") + if err := awsAwsjson11_serializeDocumentEngineVersion(v.EngineVersion, ok); err != nil { + return err + } + } + + if v.ExecutionRole != nil { + ok := object.Key("ExecutionRole") + ok.String(*v.ExecutionRole) + } + + if v.IdentityCenterConfiguration != nil { + ok := object.Key("IdentityCenterConfiguration") + if err := awsAwsjson11_serializeDocumentIdentityCenterConfiguration(v.IdentityCenterConfiguration, ok); err != nil { + return err + } + } + + if v.PublishCloudWatchMetricsEnabled != nil { + ok := object.Key("PublishCloudWatchMetricsEnabled") + ok.Boolean(*v.PublishCloudWatchMetricsEnabled) + } + + if v.QueryResultsS3AccessGrantsConfiguration != nil { + ok := object.Key("QueryResultsS3AccessGrantsConfiguration") + if err := awsAwsjson11_serializeDocumentQueryResultsS3AccessGrantsConfiguration(v.QueryResultsS3AccessGrantsConfiguration, ok); err != nil { + return err + } + } + + if v.RequesterPaysEnabled != nil { + ok := object.Key("RequesterPaysEnabled") + ok.Boolean(*v.RequesterPaysEnabled) + } + + if v.ResultConfiguration != nil { + ok := object.Key("ResultConfiguration") + if err := awsAwsjson11_serializeDocumentResultConfiguration(v.ResultConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentWorkGroupConfigurationUpdates(v *types.WorkGroupConfigurationUpdates, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AdditionalConfiguration != nil { + ok := object.Key("AdditionalConfiguration") + ok.String(*v.AdditionalConfiguration) + } + + if v.BytesScannedCutoffPerQuery != nil { + ok := object.Key("BytesScannedCutoffPerQuery") + ok.Long(*v.BytesScannedCutoffPerQuery) + } + + if v.CustomerContentEncryptionConfiguration != nil { + ok := object.Key("CustomerContentEncryptionConfiguration") + if err := awsAwsjson11_serializeDocumentCustomerContentEncryptionConfiguration(v.CustomerContentEncryptionConfiguration, ok); err != nil { + return err + } + } + + if v.EnableMinimumEncryptionConfiguration != nil { + ok := object.Key("EnableMinimumEncryptionConfiguration") + ok.Boolean(*v.EnableMinimumEncryptionConfiguration) + } + + if v.EnforceWorkGroupConfiguration != nil { + ok := object.Key("EnforceWorkGroupConfiguration") + ok.Boolean(*v.EnforceWorkGroupConfiguration) + } + + if v.EngineVersion != nil { + ok := object.Key("EngineVersion") + if err := awsAwsjson11_serializeDocumentEngineVersion(v.EngineVersion, ok); err != nil { + return err + } + } + + if v.ExecutionRole != nil { + ok := object.Key("ExecutionRole") + ok.String(*v.ExecutionRole) + } + + if v.PublishCloudWatchMetricsEnabled != nil { + ok := object.Key("PublishCloudWatchMetricsEnabled") + ok.Boolean(*v.PublishCloudWatchMetricsEnabled) + } + + if v.QueryResultsS3AccessGrantsConfiguration != nil { + ok := object.Key("QueryResultsS3AccessGrantsConfiguration") + if err := awsAwsjson11_serializeDocumentQueryResultsS3AccessGrantsConfiguration(v.QueryResultsS3AccessGrantsConfiguration, ok); err != nil { + return err + } + } + + if v.RemoveBytesScannedCutoffPerQuery != nil { + ok := object.Key("RemoveBytesScannedCutoffPerQuery") + ok.Boolean(*v.RemoveBytesScannedCutoffPerQuery) + } + + if v.RemoveCustomerContentEncryptionConfiguration != nil { + ok := object.Key("RemoveCustomerContentEncryptionConfiguration") + ok.Boolean(*v.RemoveCustomerContentEncryptionConfiguration) + } + + if v.RequesterPaysEnabled != nil { + ok := object.Key("RequesterPaysEnabled") + ok.Boolean(*v.RequesterPaysEnabled) + } + + if v.ResultConfigurationUpdates != nil { + ok := object.Key("ResultConfigurationUpdates") + if err := awsAwsjson11_serializeDocumentResultConfigurationUpdates(v.ResultConfigurationUpdates, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentWorkGroupNamesList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchGetNamedQueryInput(v *BatchGetNamedQueryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NamedQueryIds != nil { + ok := object.Key("NamedQueryIds") + if err := awsAwsjson11_serializeDocumentNamedQueryIdList(v.NamedQueryIds, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchGetPreparedStatementInput(v *BatchGetPreparedStatementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.PreparedStatementNames != nil { + ok := object.Key("PreparedStatementNames") + if err := awsAwsjson11_serializeDocumentPreparedStatementNameList(v.PreparedStatementNames, ok); err != nil { + return err + } + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentBatchGetQueryExecutionInput(v *BatchGetQueryExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.QueryExecutionIds != nil { + ok := object.Key("QueryExecutionIds") + if err := awsAwsjson11_serializeDocumentQueryExecutionIdList(v.QueryExecutionIds, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCancelCapacityReservationInput(v *CancelCapacityReservationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateCapacityReservationInput(v *CreateCapacityReservationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if v.TargetDpus != nil { + ok := object.Key("TargetDpus") + ok.Integer(*v.TargetDpus) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateDataCatalogInput(v *CreateDataCatalogInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Parameters != nil { + ok := object.Key("Parameters") + if err := awsAwsjson11_serializeDocumentParametersMap(v.Parameters, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateNamedQueryInput(v *CreateNamedQueryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.Database != nil { + ok := object.Key("Database") + ok.String(*v.Database) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.QueryString != nil { + ok := object.Key("QueryString") + ok.String(*v.QueryString) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateNotebookInput(v *CreateNotebookInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreatePreparedStatementInput(v *CreatePreparedStatementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.QueryStatement != nil { + ok := object.Key("QueryStatement") + ok.String(*v.QueryStatement) + } + + if v.StatementName != nil { + ok := object.Key("StatementName") + ok.String(*v.StatementName) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreatePresignedNotebookUrlInput(v *CreatePresignedNotebookUrlInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateWorkGroupInput(v *CreateWorkGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Configuration != nil { + ok := object.Key("Configuration") + if err := awsAwsjson11_serializeDocumentWorkGroupConfiguration(v.Configuration, ok); err != nil { + return err + } + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteCapacityReservationInput(v *DeleteCapacityReservationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteDataCatalogInput(v *DeleteDataCatalogInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DeleteCatalogOnly { + ok := object.Key("DeleteCatalogOnly") + ok.Boolean(v.DeleteCatalogOnly) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteNamedQueryInput(v *DeleteNamedQueryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NamedQueryId != nil { + ok := object.Key("NamedQueryId") + ok.String(*v.NamedQueryId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteNotebookInput(v *DeleteNotebookInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NotebookId != nil { + ok := object.Key("NotebookId") + ok.String(*v.NotebookId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeletePreparedStatementInput(v *DeletePreparedStatementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StatementName != nil { + ok := object.Key("StatementName") + ok.String(*v.StatementName) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteWorkGroupInput(v *DeleteWorkGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.RecursiveDeleteOption != nil { + ok := object.Key("RecursiveDeleteOption") + ok.Boolean(*v.RecursiveDeleteOption) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentExportNotebookInput(v *ExportNotebookInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NotebookId != nil { + ok := object.Key("NotebookId") + ok.String(*v.NotebookId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetCalculationExecutionCodeInput(v *GetCalculationExecutionCodeInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CalculationExecutionId != nil { + ok := object.Key("CalculationExecutionId") + ok.String(*v.CalculationExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetCalculationExecutionInput(v *GetCalculationExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CalculationExecutionId != nil { + ok := object.Key("CalculationExecutionId") + ok.String(*v.CalculationExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetCalculationExecutionStatusInput(v *GetCalculationExecutionStatusInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CalculationExecutionId != nil { + ok := object.Key("CalculationExecutionId") + ok.String(*v.CalculationExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetCapacityAssignmentConfigurationInput(v *GetCapacityAssignmentConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityReservationName != nil { + ok := object.Key("CapacityReservationName") + ok.String(*v.CapacityReservationName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetCapacityReservationInput(v *GetCapacityReservationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetDatabaseInput(v *GetDatabaseInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CatalogName != nil { + ok := object.Key("CatalogName") + ok.String(*v.CatalogName) + } + + if v.DatabaseName != nil { + ok := object.Key("DatabaseName") + ok.String(*v.DatabaseName) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetDataCatalogInput(v *GetDataCatalogInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetNamedQueryInput(v *GetNamedQueryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NamedQueryId != nil { + ok := object.Key("NamedQueryId") + ok.String(*v.NamedQueryId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetNotebookMetadataInput(v *GetNotebookMetadataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NotebookId != nil { + ok := object.Key("NotebookId") + ok.String(*v.NotebookId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetPreparedStatementInput(v *GetPreparedStatementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StatementName != nil { + ok := object.Key("StatementName") + ok.String(*v.StatementName) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetQueryExecutionInput(v *GetQueryExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.QueryExecutionId != nil { + ok := object.Key("QueryExecutionId") + ok.String(*v.QueryExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetQueryResultsInput(v *GetQueryResultsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.QueryExecutionId != nil { + ok := object.Key("QueryExecutionId") + ok.String(*v.QueryExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetQueryRuntimeStatisticsInput(v *GetQueryRuntimeStatisticsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.QueryExecutionId != nil { + ok := object.Key("QueryExecutionId") + ok.String(*v.QueryExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetSessionInput(v *GetSessionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetSessionStatusInput(v *GetSessionStatusInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetTableMetadataInput(v *GetTableMetadataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CatalogName != nil { + ok := object.Key("CatalogName") + ok.String(*v.CatalogName) + } + + if v.DatabaseName != nil { + ok := object.Key("DatabaseName") + ok.String(*v.DatabaseName) + } + + if v.TableName != nil { + ok := object.Key("TableName") + ok.String(*v.TableName) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetWorkGroupInput(v *GetWorkGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentImportNotebookInput(v *ImportNotebookInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.NotebookS3LocationUri != nil { + ok := object.Key("NotebookS3LocationUri") + ok.String(*v.NotebookS3LocationUri) + } + + if v.Payload != nil { + ok := object.Key("Payload") + ok.String(*v.Payload) + } + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListApplicationDPUSizesInput(v *ListApplicationDPUSizesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListCalculationExecutionsInput(v *ListCalculationExecutionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + if len(v.StateFilter) > 0 { + ok := object.Key("StateFilter") + ok.String(string(v.StateFilter)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListCapacityReservationsInput(v *ListCapacityReservationsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListDatabasesInput(v *ListDatabasesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CatalogName != nil { + ok := object.Key("CatalogName") + ok.String(*v.CatalogName) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListDataCatalogsInput(v *ListDataCatalogsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListEngineVersionsInput(v *ListEngineVersionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListExecutorsInput(v *ListExecutorsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ExecutorStateFilter) > 0 { + ok := object.Key("ExecutorStateFilter") + ok.String(string(v.ExecutorStateFilter)) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListNamedQueriesInput(v *ListNamedQueriesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListNotebookMetadataInput(v *ListNotebookMetadataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filters != nil { + ok := object.Key("Filters") + if err := awsAwsjson11_serializeDocumentFilterDefinition(v.Filters, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListNotebookSessionsInput(v *ListNotebookSessionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.NotebookId != nil { + ok := object.Key("NotebookId") + ok.String(*v.NotebookId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListPreparedStatementsInput(v *ListPreparedStatementsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListQueryExecutionsInput(v *ListQueryExecutionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListSessionsInput(v *ListSessionsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if len(v.StateFilter) > 0 { + ok := object.Key("StateFilter") + ok.String(string(v.StateFilter)) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTableMetadataInput(v *ListTableMetadataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CatalogName != nil { + ok := object.Key("CatalogName") + ok.String(*v.CatalogName) + } + + if v.DatabaseName != nil { + ok := object.Key("DatabaseName") + ok.String(*v.DatabaseName) + } + + if v.Expression != nil { + ok := object.Key("Expression") + ok.String(*v.Expression) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListTagsForResourceInput(v *ListTagsForResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if v.ResourceARN != nil { + ok := object.Key("ResourceARN") + ok.String(*v.ResourceARN) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListWorkGroupsInput(v *ListWorkGroupsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutCapacityAssignmentConfigurationInput(v *PutCapacityAssignmentConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CapacityAssignments != nil { + ok := object.Key("CapacityAssignments") + if err := awsAwsjson11_serializeDocumentCapacityAssignmentsList(v.CapacityAssignments, ok); err != nil { + return err + } + } + + if v.CapacityReservationName != nil { + ok := object.Key("CapacityReservationName") + ok.String(*v.CapacityReservationName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStartCalculationExecutionInput(v *StartCalculationExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CalculationConfiguration != nil { + ok := object.Key("CalculationConfiguration") + if err := awsAwsjson11_serializeDocumentCalculationConfiguration(v.CalculationConfiguration, ok); err != nil { + return err + } + } + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.CodeBlock != nil { + ok := object.Key("CodeBlock") + ok.String(*v.CodeBlock) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStartQueryExecutionInput(v *StartQueryExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.ExecutionParameters != nil { + ok := object.Key("ExecutionParameters") + if err := awsAwsjson11_serializeDocumentExecutionParameters(v.ExecutionParameters, ok); err != nil { + return err + } + } + + if v.QueryExecutionContext != nil { + ok := object.Key("QueryExecutionContext") + if err := awsAwsjson11_serializeDocumentQueryExecutionContext(v.QueryExecutionContext, ok); err != nil { + return err + } + } + + if v.QueryString != nil { + ok := object.Key("QueryString") + ok.String(*v.QueryString) + } + + if v.ResultConfiguration != nil { + ok := object.Key("ResultConfiguration") + if err := awsAwsjson11_serializeDocumentResultConfiguration(v.ResultConfiguration, ok); err != nil { + return err + } + } + + if v.ResultReuseConfiguration != nil { + ok := object.Key("ResultReuseConfiguration") + if err := awsAwsjson11_serializeDocumentResultReuseConfiguration(v.ResultReuseConfiguration, ok); err != nil { + return err + } + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStartSessionInput(v *StartSessionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.EngineConfiguration != nil { + ok := object.Key("EngineConfiguration") + if err := awsAwsjson11_serializeDocumentEngineConfiguration(v.EngineConfiguration, ok); err != nil { + return err + } + } + + if v.NotebookVersion != nil { + ok := object.Key("NotebookVersion") + ok.String(*v.NotebookVersion) + } + + if v.SessionIdleTimeoutInMinutes != nil { + ok := object.Key("SessionIdleTimeoutInMinutes") + ok.Integer(*v.SessionIdleTimeoutInMinutes) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStopCalculationExecutionInput(v *StopCalculationExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CalculationExecutionId != nil { + ok := object.Key("CalculationExecutionId") + ok.String(*v.CalculationExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentStopQueryExecutionInput(v *StopQueryExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.QueryExecutionId != nil { + ok := object.Key("QueryExecutionId") + ok.String(*v.QueryExecutionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceARN != nil { + ok := object.Key("ResourceARN") + ok.String(*v.ResourceARN) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTerminateSessionInput(v *TerminateSessionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceARN != nil { + ok := object.Key("ResourceARN") + ok.String(*v.ResourceARN) + } + + if v.TagKeys != nil { + ok := object.Key("TagKeys") + if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateCapacityReservationInput(v *UpdateCapacityReservationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.TargetDpus != nil { + ok := object.Key("TargetDpus") + ok.Integer(*v.TargetDpus) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateDataCatalogInput(v *UpdateDataCatalogInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Parameters != nil { + ok := object.Key("Parameters") + if err := awsAwsjson11_serializeDocumentParametersMap(v.Parameters, ok); err != nil { + return err + } + } + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateNamedQueryInput(v *UpdateNamedQueryInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.NamedQueryId != nil { + ok := object.Key("NamedQueryId") + ok.String(*v.NamedQueryId) + } + + if v.QueryString != nil { + ok := object.Key("QueryString") + ok.String(*v.QueryString) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateNotebookInput(v *UpdateNotebookInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.NotebookId != nil { + ok := object.Key("NotebookId") + ok.String(*v.NotebookId) + } + + if v.Payload != nil { + ok := object.Key("Payload") + ok.String(*v.Payload) + } + + if v.SessionId != nil { + ok := object.Key("SessionId") + ok.String(*v.SessionId) + } + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateNotebookMetadataInput(v *UpdateNotebookMetadataInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.NotebookId != nil { + ok := object.Key("NotebookId") + ok.String(*v.NotebookId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdatePreparedStatementInput(v *UpdatePreparedStatementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.QueryStatement != nil { + ok := object.Key("QueryStatement") + ok.String(*v.QueryStatement) + } + + if v.StatementName != nil { + ok := object.Key("StatementName") + ok.String(*v.StatementName) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateWorkGroupInput(v *UpdateWorkGroupInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConfigurationUpdates != nil { + ok := object.Key("ConfigurationUpdates") + if err := awsAwsjson11_serializeDocumentWorkGroupConfigurationUpdates(v.ConfigurationUpdates, ok); err != nil { + return err + } + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if len(v.State) > 0 { + ok := object.Key("State") + ok.String(string(v.State)) + } + + if v.WorkGroup != nil { + ok := object.Key("WorkGroup") + ok.String(*v.WorkGroup) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/enums.go new file mode 100644 index 00000000..920abbd6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/enums.go @@ -0,0 +1,447 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AuthenticationType string + +// Enum values for AuthenticationType +const ( + AuthenticationTypeDirectoryIdentity AuthenticationType = "DIRECTORY_IDENTITY" +) + +// Values returns all known values for AuthenticationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (AuthenticationType) Values() []AuthenticationType { + return []AuthenticationType{ + "DIRECTORY_IDENTITY", + } +} + +type CalculationExecutionState string + +// Enum values for CalculationExecutionState +const ( + CalculationExecutionStateCreating CalculationExecutionState = "CREATING" + CalculationExecutionStateCreated CalculationExecutionState = "CREATED" + CalculationExecutionStateQueued CalculationExecutionState = "QUEUED" + CalculationExecutionStateRunning CalculationExecutionState = "RUNNING" + CalculationExecutionStateCanceling CalculationExecutionState = "CANCELING" + CalculationExecutionStateCanceled CalculationExecutionState = "CANCELED" + CalculationExecutionStateCompleted CalculationExecutionState = "COMPLETED" + CalculationExecutionStateFailed CalculationExecutionState = "FAILED" +) + +// Values returns all known values for CalculationExecutionState. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CalculationExecutionState) Values() []CalculationExecutionState { + return []CalculationExecutionState{ + "CREATING", + "CREATED", + "QUEUED", + "RUNNING", + "CANCELING", + "CANCELED", + "COMPLETED", + "FAILED", + } +} + +type CapacityAllocationStatus string + +// Enum values for CapacityAllocationStatus +const ( + CapacityAllocationStatusPending CapacityAllocationStatus = "PENDING" + CapacityAllocationStatusSucceeded CapacityAllocationStatus = "SUCCEEDED" + CapacityAllocationStatusFailed CapacityAllocationStatus = "FAILED" +) + +// Values returns all known values for CapacityAllocationStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityAllocationStatus) Values() []CapacityAllocationStatus { + return []CapacityAllocationStatus{ + "PENDING", + "SUCCEEDED", + "FAILED", + } +} + +type CapacityReservationStatus string + +// Enum values for CapacityReservationStatus +const ( + CapacityReservationStatusPending CapacityReservationStatus = "PENDING" + CapacityReservationStatusActive CapacityReservationStatus = "ACTIVE" + CapacityReservationStatusCancelling CapacityReservationStatus = "CANCELLING" + CapacityReservationStatusCancelled CapacityReservationStatus = "CANCELLED" + CapacityReservationStatusFailed CapacityReservationStatus = "FAILED" + CapacityReservationStatusUpdatePending CapacityReservationStatus = "UPDATE_PENDING" +) + +// Values returns all known values for CapacityReservationStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (CapacityReservationStatus) Values() []CapacityReservationStatus { + return []CapacityReservationStatus{ + "PENDING", + "ACTIVE", + "CANCELLING", + "CANCELLED", + "FAILED", + "UPDATE_PENDING", + } +} + +type ColumnNullable string + +// Enum values for ColumnNullable +const ( + ColumnNullableNotNull ColumnNullable = "NOT_NULL" + ColumnNullableNullable ColumnNullable = "NULLABLE" + ColumnNullableUnknown ColumnNullable = "UNKNOWN" +) + +// Values returns all known values for ColumnNullable. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ColumnNullable) Values() []ColumnNullable { + return []ColumnNullable{ + "NOT_NULL", + "NULLABLE", + "UNKNOWN", + } +} + +type ConnectionType string + +// Enum values for ConnectionType +const ( + ConnectionTypeDynamodb ConnectionType = "DYNAMODB" + ConnectionTypeMysql ConnectionType = "MYSQL" + ConnectionTypePostgresql ConnectionType = "POSTGRESQL" + ConnectionTypeRedshift ConnectionType = "REDSHIFT" + ConnectionTypeOracle ConnectionType = "ORACLE" + ConnectionTypeSynapse ConnectionType = "SYNAPSE" + ConnectionTypeSqlserver ConnectionType = "SQLSERVER" + ConnectionTypeDb2 ConnectionType = "DB2" + ConnectionTypeOpensearch ConnectionType = "OPENSEARCH" + ConnectionTypeBigquery ConnectionType = "BIGQUERY" + ConnectionTypeGooglecloudstorage ConnectionType = "GOOGLECLOUDSTORAGE" + ConnectionTypeHbase ConnectionType = "HBASE" + ConnectionTypeDocumentdb ConnectionType = "DOCUMENTDB" + ConnectionTypeCmdb ConnectionType = "CMDB" + ConnectionTypeTpcds ConnectionType = "TPCDS" + ConnectionTypeTimestream ConnectionType = "TIMESTREAM" + ConnectionTypeSaphana ConnectionType = "SAPHANA" + ConnectionTypeSnowflake ConnectionType = "SNOWFLAKE" + ConnectionTypeDatalakegen2 ConnectionType = "DATALAKEGEN2" + ConnectionTypeDb2as400 ConnectionType = "DB2AS400" +) + +// Values returns all known values for ConnectionType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ConnectionType) Values() []ConnectionType { + return []ConnectionType{ + "DYNAMODB", + "MYSQL", + "POSTGRESQL", + "REDSHIFT", + "ORACLE", + "SYNAPSE", + "SQLSERVER", + "DB2", + "OPENSEARCH", + "BIGQUERY", + "GOOGLECLOUDSTORAGE", + "HBASE", + "DOCUMENTDB", + "CMDB", + "TPCDS", + "TIMESTREAM", + "SAPHANA", + "SNOWFLAKE", + "DATALAKEGEN2", + "DB2AS400", + } +} + +type DataCatalogStatus string + +// Enum values for DataCatalogStatus +const ( + DataCatalogStatusCreateInProgress DataCatalogStatus = "CREATE_IN_PROGRESS" + DataCatalogStatusCreateComplete DataCatalogStatus = "CREATE_COMPLETE" + DataCatalogStatusCreateFailed DataCatalogStatus = "CREATE_FAILED" + DataCatalogStatusCreateFailedCleanupInProgress DataCatalogStatus = "CREATE_FAILED_CLEANUP_IN_PROGRESS" + DataCatalogStatusCreateFailedCleanupComplete DataCatalogStatus = "CREATE_FAILED_CLEANUP_COMPLETE" + DataCatalogStatusCreateFailedCleanupFailed DataCatalogStatus = "CREATE_FAILED_CLEANUP_FAILED" + DataCatalogStatusDeleteInProgress DataCatalogStatus = "DELETE_IN_PROGRESS" + DataCatalogStatusDeleteComplete DataCatalogStatus = "DELETE_COMPLETE" + DataCatalogStatusDeleteFailed DataCatalogStatus = "DELETE_FAILED" +) + +// Values returns all known values for DataCatalogStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataCatalogStatus) Values() []DataCatalogStatus { + return []DataCatalogStatus{ + "CREATE_IN_PROGRESS", + "CREATE_COMPLETE", + "CREATE_FAILED", + "CREATE_FAILED_CLEANUP_IN_PROGRESS", + "CREATE_FAILED_CLEANUP_COMPLETE", + "CREATE_FAILED_CLEANUP_FAILED", + "DELETE_IN_PROGRESS", + "DELETE_COMPLETE", + "DELETE_FAILED", + } +} + +type DataCatalogType string + +// Enum values for DataCatalogType +const ( + DataCatalogTypeLambda DataCatalogType = "LAMBDA" + DataCatalogTypeGlue DataCatalogType = "GLUE" + DataCatalogTypeHive DataCatalogType = "HIVE" + DataCatalogTypeFederated DataCatalogType = "FEDERATED" +) + +// Values returns all known values for DataCatalogType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataCatalogType) Values() []DataCatalogType { + return []DataCatalogType{ + "LAMBDA", + "GLUE", + "HIVE", + "FEDERATED", + } +} + +type EncryptionOption string + +// Enum values for EncryptionOption +const ( + EncryptionOptionSseS3 EncryptionOption = "SSE_S3" + EncryptionOptionSseKms EncryptionOption = "SSE_KMS" + EncryptionOptionCseKms EncryptionOption = "CSE_KMS" +) + +// Values returns all known values for EncryptionOption. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EncryptionOption) Values() []EncryptionOption { + return []EncryptionOption{ + "SSE_S3", + "SSE_KMS", + "CSE_KMS", + } +} + +type ExecutorState string + +// Enum values for ExecutorState +const ( + ExecutorStateCreating ExecutorState = "CREATING" + ExecutorStateCreated ExecutorState = "CREATED" + ExecutorStateRegistered ExecutorState = "REGISTERED" + ExecutorStateTerminating ExecutorState = "TERMINATING" + ExecutorStateTerminated ExecutorState = "TERMINATED" + ExecutorStateFailed ExecutorState = "FAILED" +) + +// Values returns all known values for ExecutorState. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExecutorState) Values() []ExecutorState { + return []ExecutorState{ + "CREATING", + "CREATED", + "REGISTERED", + "TERMINATING", + "TERMINATED", + "FAILED", + } +} + +type ExecutorType string + +// Enum values for ExecutorType +const ( + ExecutorTypeCoordinator ExecutorType = "COORDINATOR" + ExecutorTypeGateway ExecutorType = "GATEWAY" + ExecutorTypeWorker ExecutorType = "WORKER" +) + +// Values returns all known values for ExecutorType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ExecutorType) Values() []ExecutorType { + return []ExecutorType{ + "COORDINATOR", + "GATEWAY", + "WORKER", + } +} + +type NotebookType string + +// Enum values for NotebookType +const ( + NotebookTypeIpynb NotebookType = "IPYNB" +) + +// Values returns all known values for NotebookType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (NotebookType) Values() []NotebookType { + return []NotebookType{ + "IPYNB", + } +} + +type QueryExecutionState string + +// Enum values for QueryExecutionState +const ( + QueryExecutionStateQueued QueryExecutionState = "QUEUED" + QueryExecutionStateRunning QueryExecutionState = "RUNNING" + QueryExecutionStateSucceeded QueryExecutionState = "SUCCEEDED" + QueryExecutionStateFailed QueryExecutionState = "FAILED" + QueryExecutionStateCancelled QueryExecutionState = "CANCELLED" +) + +// Values returns all known values for QueryExecutionState. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (QueryExecutionState) Values() []QueryExecutionState { + return []QueryExecutionState{ + "QUEUED", + "RUNNING", + "SUCCEEDED", + "FAILED", + "CANCELLED", + } +} + +type S3AclOption string + +// Enum values for S3AclOption +const ( + S3AclOptionBucketOwnerFullControl S3AclOption = "BUCKET_OWNER_FULL_CONTROL" +) + +// Values returns all known values for S3AclOption. Note that this can be expanded +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (S3AclOption) Values() []S3AclOption { + return []S3AclOption{ + "BUCKET_OWNER_FULL_CONTROL", + } +} + +type SessionState string + +// Enum values for SessionState +const ( + SessionStateCreating SessionState = "CREATING" + SessionStateCreated SessionState = "CREATED" + SessionStateIdle SessionState = "IDLE" + SessionStateBusy SessionState = "BUSY" + SessionStateTerminating SessionState = "TERMINATING" + SessionStateTerminated SessionState = "TERMINATED" + SessionStateDegraded SessionState = "DEGRADED" + SessionStateFailed SessionState = "FAILED" +) + +// Values returns all known values for SessionState. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (SessionState) Values() []SessionState { + return []SessionState{ + "CREATING", + "CREATED", + "IDLE", + "BUSY", + "TERMINATING", + "TERMINATED", + "DEGRADED", + "FAILED", + } +} + +type StatementType string + +// Enum values for StatementType +const ( + StatementTypeDdl StatementType = "DDL" + StatementTypeDml StatementType = "DML" + StatementTypeUtility StatementType = "UTILITY" +) + +// Values returns all known values for StatementType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (StatementType) Values() []StatementType { + return []StatementType{ + "DDL", + "DML", + "UTILITY", + } +} + +type ThrottleReason string + +// Enum values for ThrottleReason +const ( + ThrottleReasonConcurrentQueryLimitExceeded ThrottleReason = "CONCURRENT_QUERY_LIMIT_EXCEEDED" +) + +// Values returns all known values for ThrottleReason. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ThrottleReason) Values() []ThrottleReason { + return []ThrottleReason{ + "CONCURRENT_QUERY_LIMIT_EXCEEDED", + } +} + +type WorkGroupState string + +// Enum values for WorkGroupState +const ( + WorkGroupStateEnabled WorkGroupState = "ENABLED" + WorkGroupStateDisabled WorkGroupState = "DISABLED" +) + +// Values returns all known values for WorkGroupState. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (WorkGroupState) Values() []WorkGroupState { + return []WorkGroupState{ + "ENABLED", + "DISABLED", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/errors.go new file mode 100644 index 00000000..bae86401 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/errors.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// Indicates a platform issue, which may be due to a transient condition or outage. +type InternalServerException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerException" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// Indicates that something is wrong with the input to the request. For example, a +// required parameter may be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + AthenaErrorCode *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// An exception that Athena received when it called a custom metastore. Occurs if +// the error is not caused by user input ( InvalidRequestException ) or from the +// Athena platform ( InternalServerException ). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned in +// a MetadataException . +type MetadataException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MetadataException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MetadataException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MetadataException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MetadataException" + } + return *e.ErrorCodeOverride +} +func (e *MetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// A resource, such as a workgroup, was not found. +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + ResourceName *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified session already exists. +type SessionAlreadyExistsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *SessionAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SessionAlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SessionAlreadyExistsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "SessionAlreadyExistsException" + } + return *e.ErrorCodeOverride +} +func (e *SessionAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request was throttled. +type TooManyRequestsException struct { + Message *string + + ErrorCodeOverride *string + + Reason ThrottleReason + + noSmithyDocumentSerde +} + +func (e *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyRequestsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyRequestsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyRequestsException" + } + return *e.ErrorCodeOverride +} +func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/types.go new file mode 100644 index 00000000..03980606 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/types/types.go @@ -0,0 +1,1700 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Indicates that an Amazon S3 canned ACL should be set to control ownership of +// stored query results, including data files inserted by Athena as the result of +// statements like CTAS or INSERT INTO. When Athena stores query results in Amazon +// S3, the canned ACL is set with the x-amz-acl request header. For more +// information about S3 Object Ownership, see [Object Ownership settings]in the Amazon S3 User Guide. +// +// [Object Ownership settings]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html#object-ownership-overview +type AclConfiguration struct { + + // The Amazon S3 canned ACL that Athena should specify when storing query results, + // including data files inserted by Athena as the result of statements like CTAS or + // INSERT INTO. Currently the only supported canned ACL is + // BUCKET_OWNER_FULL_CONTROL . If a query runs in a workgroup and the workgroup + // overrides client-side settings, then the Amazon S3 canned ACL specified in the + // workgroup's settings is used for all queries that run in the workgroup. For more + // information about Amazon S3 canned ACLs, see [Canned ACL]in the Amazon S3 User Guide. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl + // + // This member is required. + S3AclOption S3AclOption + + noSmithyDocumentSerde +} + +// Contains the application runtime IDs and their supported DPU sizes. +type ApplicationDPUSizes struct { + + // The name of the supported application runtime (for example, Athena notebook + // version 1 ). + ApplicationRuntimeId *string + + // A list of the supported DPU sizes that the application runtime supports. + SupportedDPUSizes []int32 + + noSmithyDocumentSerde +} + +// Provides information about an Athena query error. The AthenaError feature +// provides standardized error information to help you understand failed queries +// and take steps after a query failure occurs. AthenaError includes an +// ErrorCategory field that specifies whether the cause of the failed query is due +// to system error, user error, or other error. +type AthenaError struct { + + // An integer value that specifies the category of a query failure error. The + // following list shows the category for each integer value. + // + // 1 - System + // + // 2 - User + // + // 3 - Other + ErrorCategory *int32 + + // Contains a short description of the error that occurred. + ErrorMessage *string + + // An integer value that provides specific information about an Athena query + // error. For the meaning of specific values, see the [Error Type Reference]in the Amazon Athena User + // Guide. + // + // [Error Type Reference]: https://docs.aws.amazon.com/athena/latest/ug/error-reference.html#error-reference-error-type-reference + ErrorType *int32 + + // True if the query might succeed if resubmitted. + Retryable bool + + noSmithyDocumentSerde +} + +// Contains configuration information for the calculation. +type CalculationConfiguration struct { + + // A string that contains the code for the calculation. + CodeBlock *string + + noSmithyDocumentSerde +} + +// Contains information about an application-specific calculation result. +type CalculationResult struct { + + // The Amazon S3 location of the folder for the calculation results. + ResultS3Uri *string + + // The data format of the calculation result. + ResultType *string + + // The Amazon S3 location of the stderr error messages file for the calculation. + StdErrorS3Uri *string + + // The Amazon S3 location of the stdout file for the calculation. + StdOutS3Uri *string + + noSmithyDocumentSerde +} + +// Contains statistics for a notebook calculation. +type CalculationStatistics struct { + + // The data processing unit execution time in milliseconds for the calculation. + DpuExecutionInMillis *int64 + + // The progress of the calculation. + Progress *string + + noSmithyDocumentSerde +} + +// Contains information about the status of a notebook calculation. +type CalculationStatus struct { + + // The date and time the calculation completed processing. + CompletionDateTime *time.Time + + // The state of the calculation execution. A description of each state follows. + // + // CREATING - The calculation is in the process of being created. + // + // CREATED - The calculation has been created and is ready to run. + // + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + State CalculationExecutionState + + // The reason for the calculation state change (for example, the calculation was + // canceled because the session was terminated). + StateChangeReason *string + + // The date and time the calculation was submitted for processing. + SubmissionDateTime *time.Time + + noSmithyDocumentSerde +} + +// Summary information for a notebook calculation. +type CalculationSummary struct { + + // The calculation execution UUID. + CalculationExecutionId *string + + // A description of the calculation. + Description *string + + // Contains information about the status of the calculation. + Status *CalculationStatus + + noSmithyDocumentSerde +} + +// Contains the submission time of a single allocation request for a capacity +// reservation and the most recent status of the attempted allocation. +type CapacityAllocation struct { + + // The time when the capacity allocation was requested. + // + // This member is required. + RequestTime *time.Time + + // The status of the capacity allocation. + // + // This member is required. + Status CapacityAllocationStatus + + // The time when the capacity allocation request was completed. + RequestCompletionTime *time.Time + + // The status message of the capacity allocation. + StatusMessage *string + + noSmithyDocumentSerde +} + +// A mapping between one or more workgroups and a capacity reservation. +type CapacityAssignment struct { + + // The list of workgroup names for the capacity assignment. + WorkGroupNames []string + + noSmithyDocumentSerde +} + +// Assigns Athena workgroups (and hence their queries) to capacity reservations. A +// capacity reservation can have only one capacity assignment configuration, but +// the capacity assignment configuration can be made up of multiple individual +// assignments. Each assignment specifies how Athena queries can consume capacity +// from the capacity reservation that their workgroup is mapped to. +type CapacityAssignmentConfiguration struct { + + // The list of assignments that make up the capacity assignment configuration. + CapacityAssignments []CapacityAssignment + + // The name of the reservation that the capacity assignment configuration is for. + CapacityReservationName *string + + noSmithyDocumentSerde +} + +// A reservation for a specified number of data processing units (DPUs). When a +// reservation is initially created, it has no DPUs. Athena allocates DPUs until +// the allocated amount equals the requested amount. +type CapacityReservation struct { + + // The number of data processing units currently allocated. + // + // This member is required. + AllocatedDpus *int32 + + // The time in UTC epoch millis when the capacity reservation was created. + // + // This member is required. + CreationTime *time.Time + + // The name of the capacity reservation. + // + // This member is required. + Name *string + + // The status of the capacity reservation. + // + // This member is required. + Status CapacityReservationStatus + + // The number of data processing units requested. + // + // This member is required. + TargetDpus *int32 + + // Contains the submission time of a single allocation request for a capacity + // reservation and the most recent status of the attempted allocation. + LastAllocation *CapacityAllocation + + // The time of the most recent capacity allocation that succeeded. + LastSuccessfulAllocationTime *time.Time + + noSmithyDocumentSerde +} + +// Contains metadata for a column in a table. +type Column struct { + + // The name of the column. + // + // This member is required. + Name *string + + // Optional information about the column. + Comment *string + + // The data type of the column. + Type *string + + noSmithyDocumentSerde +} + +// Information about the columns in a query execution result. +type ColumnInfo struct { + + // The name of the column. + // + // This member is required. + Name *string + + // The data type of the column. + // + // This member is required. + Type *string + + // Indicates whether values in the column are case-sensitive. + CaseSensitive bool + + // The catalog to which the query results belong. + CatalogName *string + + // A column label. + Label *string + + // Unsupported constraint. This value always shows as UNKNOWN . + Nullable ColumnNullable + + // For DECIMAL data types, specifies the total number of digits, up to 38. For + // performance reasons, we recommend up to 18 digits. + Precision int32 + + // For DECIMAL data types, specifies the total number of digits in the fractional + // part of the value. Defaults to 0. + Scale int32 + + // The schema name (database name) to which the query results belong. + SchemaName *string + + // The table name for the query results. + TableName *string + + noSmithyDocumentSerde +} + +// Specifies the customer managed KMS key that is used to encrypt the user's data +// stores in Athena. When an Amazon Web Services managed key is used, this value is +// null. This setting does not apply to Athena SQL workgroups. +type CustomerContentEncryptionConfiguration struct { + + // The customer managed KMS key that is used to encrypt the user's data stores in + // Athena. + // + // This member is required. + KmsKey *string + + noSmithyDocumentSerde +} + +// Contains metadata information for a database in a data catalog. +type Database struct { + + // The name of the database. + // + // This member is required. + Name *string + + // An optional description of the database. + Description *string + + // A set of custom key/value pairs. + Parameters map[string]string + + noSmithyDocumentSerde +} + +// Contains information about a data catalog in an Amazon Web Services account. +// +// In the Athena console, data catalogs are listed as "data sources" on the Data +// sources page under the Data source name column. +type DataCatalog struct { + + // The name of the data catalog. The catalog name must be unique for the Amazon + // Web Services account and can use a maximum of 127 alphanumeric, underscore, at + // sign, or hyphen characters. The remainder of the length constraint of 256 is + // reserved for use by Athena. + // + // This member is required. + Name *string + + // The type of data catalog to create: LAMBDA for a federated catalog, GLUE for an + // Glue Data Catalog, and HIVE for an external Apache Hive metastore. FEDERATED is + // a federated catalog for which Athena creates the connection and the Lambda + // function for you based on the parameters that you pass. + // + // This member is required. + Type DataCatalogType + + // The type of connection for a FEDERATED data catalog (for example, REDSHIFT , + // MYSQL , or SQLSERVER ). For information about individual connectors, see [Available data source connectors]. + // + // [Available data source connectors]: https://docs.aws.amazon.com/athena/latest/ug/connectors-available.html + ConnectionType ConnectionType + + // An optional description of the data catalog. + Description *string + + // Text of the error that occurred during data catalog creation or deletion. + Error *string + + // Specifies the Lambda function or functions to use for the data catalog. This is + // a mapping whose values depend on the catalog type. + // + // - For the HIVE data catalog type, use the following syntax. The + // metadata-function parameter is required. The sdk-version parameter is optional + // and defaults to the currently supported version. + // + // metadata-function=lambda_arn, sdk-version=version_number + // + // - For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. + // + // - If you have one Lambda function that processes metadata and another for + // reading the actual data, use the following syntax. Both parameters are required. + // + // metadata-function=lambda_arn, record-function=lambda_arn + // + // - If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. + // + // function=lambda_arn + // + // - The GLUE type takes a catalog ID parameter and is required. The catalog_id + // is the account ID of the Amazon Web Services account to which the Glue catalog + // belongs. + // + // catalog-id=catalog_id + // + // - The GLUE data catalog type also applies to the default AwsDataCatalog that + // already exists in your account, of which you can have only one and cannot + // modify. + // + // - The FEDERATED data catalog type uses one of the following parameters, but + // not both. Use connection-arn for an existing Glue connection. Use + // connection-type and connection-properties to specify the configuration setting + // for a new connection. + // + // - connection-arn: + // + // - connection-type:MYSQL|REDSHIFT|...., connection-properties:"" + // + // For , use escaped JSON text, as in the following example. + // + // "{\"spill_bucket\":\"my_spill\",\"spill_prefix\":\"athena-spill\",\"host\":\"abc12345.snowflakecomputing.com\",\"port\":\"1234\",\"warehouse\":\"DEV_WH\",\"database\":\"TEST\",\"schema\":\"PUBLIC\",\"SecretArn\":\"arn:aws:secretsmanager:ap-south-1:111122223333:secret:snowflake-XHb67j\"}" + Parameters map[string]string + + // The status of the creation or deletion of the data catalog. + // + // - The LAMBDA , GLUE , and HIVE data catalog types are created synchronously. + // Their status is either CREATE_COMPLETE or CREATE_FAILED . + // + // - The FEDERATED data catalog type is created asynchronously. + // + // Data catalog creation status: + // + // - CREATE_IN_PROGRESS : Federated data catalog creation in progress. + // + // - CREATE_COMPLETE : Data catalog creation complete. + // + // - CREATE_FAILED : Data catalog could not be created. + // + // - CREATE_FAILED_CLEANUP_IN_PROGRESS : Federated data catalog creation failed + // and is being removed. + // + // - CREATE_FAILED_CLEANUP_COMPLETE : Federated data catalog creation failed and + // was removed. + // + // - CREATE_FAILED_CLEANUP_FAILED : Federated data catalog creation failed but + // could not be removed. + // + // Data catalog deletion status: + // + // - DELETE_IN_PROGRESS : Federated data catalog deletion in progress. + // + // - DELETE_COMPLETE : Federated data catalog deleted. + // + // - DELETE_FAILED : Federated data catalog could not be deleted. + Status DataCatalogStatus + + noSmithyDocumentSerde +} + +// The summary information for the data catalog, which includes its name and type. +type DataCatalogSummary struct { + + // The name of the data catalog. The catalog name is unique for the Amazon Web + // Services account and can use a maximum of 127 alphanumeric, underscore, at sign, + // or hyphen characters. The remainder of the length constraint of 256 is reserved + // for use by Athena. + CatalogName *string + + // The type of connection for a FEDERATED data catalog (for example, REDSHIFT , + // MYSQL , or SQLSERVER ). For information about individual connectors, see [Available data source connectors]. + // + // [Available data source connectors]: https://docs.aws.amazon.com/athena/latest/ug/connectors-available.html + ConnectionType ConnectionType + + // Text of the error that occurred during data catalog creation or deletion. + Error *string + + // The status of the creation or deletion of the data catalog. + // + // - The LAMBDA , GLUE , and HIVE data catalog types are created synchronously. + // Their status is either CREATE_COMPLETE or CREATE_FAILED . + // + // - The FEDERATED data catalog type is created asynchronously. + // + // Data catalog creation status: + // + // - CREATE_IN_PROGRESS : Federated data catalog creation in progress. + // + // - CREATE_COMPLETE : Data catalog creation complete. + // + // - CREATE_FAILED : Data catalog could not be created. + // + // - CREATE_FAILED_CLEANUP_IN_PROGRESS : Federated data catalog creation failed + // and is being removed. + // + // - CREATE_FAILED_CLEANUP_COMPLETE : Federated data catalog creation failed and + // was removed. + // + // - CREATE_FAILED_CLEANUP_FAILED : Federated data catalog creation failed but + // could not be removed. + // + // Data catalog deletion status: + // + // - DELETE_IN_PROGRESS : Federated data catalog deletion in progress. + // + // - DELETE_COMPLETE : Federated data catalog deleted. + // + // - DELETE_FAILED : Federated data catalog could not be deleted. + Status DataCatalogStatus + + // The data catalog type. + Type DataCatalogType + + noSmithyDocumentSerde +} + +// A piece of data (a field in the table). +type Datum struct { + + // The value of the datum. + VarCharValue *string + + noSmithyDocumentSerde +} + +// If query and calculation results are encrypted in Amazon S3, indicates the +// encryption option used (for example, SSE_KMS or CSE_KMS ) and key information. +type EncryptionConfiguration struct { + + // Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys ( + // SSE_S3 ), server-side encryption with KMS-managed keys ( SSE_KMS ), or + // client-side encryption with KMS-managed keys ( CSE_KMS ) is used. + // + // If a query runs in a workgroup and the workgroup overrides client-side + // settings, then the workgroup's setting for encryption is used. It specifies + // whether query results must be encrypted, for all queries that run in this + // workgroup. + // + // This member is required. + EncryptionOption EncryptionOption + + // For SSE_KMS and CSE_KMS , this is the KMS key ARN or ID. + KmsKey *string + + noSmithyDocumentSerde +} + +// Contains data processing unit (DPU) configuration settings and parameter +// mappings for a notebook engine. +type EngineConfiguration struct { + + // The maximum number of DPUs that can run concurrently. + // + // This member is required. + MaxConcurrentDpus *int32 + + // Contains additional notebook engine MAP parameter mappings in the form of + // key-value pairs. To specify an Athena notebook that the Jupyter server will + // download and serve, specify a value for the StartSessionRequest$NotebookVersionfield, and then add a key named + // NotebookId to AdditionalConfigs that has the value of the Athena notebook ID. + AdditionalConfigs map[string]string + + // The number of DPUs to use for the coordinator. A coordinator is a special + // executor that orchestrates processing work and manages other executors in a + // notebook session. The default is 1. + CoordinatorDpuSize *int32 + + // The default number of DPUs to use for executors. An executor is the smallest + // unit of compute that a notebook session can request from Athena. The default is + // 1. + DefaultExecutorDpuSize *int32 + + // Specifies custom jar files and Spark properties for use cases like cluster + // encryption, table formats, and general Spark tuning. + SparkProperties map[string]string + + noSmithyDocumentSerde +} + +// The Athena engine version for running queries, or the PySpark engine version +// for running sessions. +type EngineVersion struct { + + // Read only. The engine version on which the query runs. If the user requests a + // valid engine version other than Auto, the effective engine version is the same + // as the engine version that the user requested. If the user requests Auto, the + // effective engine version is chosen by Athena. When a request to update the + // engine version is made by a CreateWorkGroup or UpdateWorkGroup operation, the + // EffectiveEngineVersion field is ignored. + EffectiveEngineVersion *string + + // The engine version requested by the user. Possible values are determined by the + // output of ListEngineVersions , including AUTO. The default is AUTO. + SelectedEngineVersion *string + + noSmithyDocumentSerde +} + +// Contains summary information about an executor. +type ExecutorsSummary struct { + + // The UUID of the executor. + // + // This member is required. + ExecutorId *string + + // The smallest unit of compute that a session can request from Athena. Size is + // measured in data processing unit (DPU) values, a relative measure of processing + // power. + ExecutorSize *int64 + + // The processing state of the executor. A description of each state follows. + // + // CREATING - The executor is being started, including acquiring resources. + // + // CREATED - The executor has been started. + // + // REGISTERED - The executor has been registered. + // + // TERMINATING - The executor is in the process of shutting down. + // + // TERMINATED - The executor is no longer running. + // + // FAILED - Due to a failure, the executor is no longer running. + ExecutorState ExecutorState + + // The type of executor used for the application ( COORDINATOR , GATEWAY , or + // WORKER ). + ExecutorType ExecutorType + + // The date and time that the executor started. + StartDateTime *int64 + + // The date and time that the executor was terminated. + TerminationDateTime *int64 + + noSmithyDocumentSerde +} + +// A string for searching notebook names. +type FilterDefinition struct { + + // The name of the notebook to search for. + Name *string + + noSmithyDocumentSerde +} + +// Specifies whether the workgroup is IAM Identity Center supported. +type IdentityCenterConfiguration struct { + + // Specifies whether the workgroup is IAM Identity Center supported. + EnableIdentityCenter *bool + + // The IAM Identity Center instance ARN that the workgroup associates to. + IdentityCenterInstanceArn *string + + noSmithyDocumentSerde +} + +// A query, where QueryString contains the SQL statements that make up the query. +type NamedQuery struct { + + // The database to which the query belongs. + // + // This member is required. + Database *string + + // The query name. + // + // This member is required. + Name *string + + // The SQL statements that make up the query. + // + // This member is required. + QueryString *string + + // The query description. + Description *string + + // The unique identifier of the query. + NamedQueryId *string + + // The name of the workgroup that contains the named query. + WorkGroup *string + + noSmithyDocumentSerde +} + +// Contains metadata for notebook, including the notebook name, ID, workgroup, and +// time created. +type NotebookMetadata struct { + + // The time when the notebook was created. + CreationTime *time.Time + + // The time when the notebook was last modified. + LastModifiedTime *time.Time + + // The name of the notebook. + Name *string + + // The notebook ID. + NotebookId *string + + // The type of notebook. Currently, the only valid type is IPYNB . + Type NotebookType + + // The name of the Spark enabled workgroup to which the notebook belongs. + WorkGroup *string + + noSmithyDocumentSerde +} + +// Contains the notebook session ID and notebook session creation time. +type NotebookSessionSummary struct { + + // The time when the notebook session was created. + CreationTime *time.Time + + // The notebook session ID. + SessionId *string + + noSmithyDocumentSerde +} + +// A prepared SQL statement for use with Athena. +type PreparedStatement struct { + + // The description of the prepared statement. + Description *string + + // The last modified time of the prepared statement. + LastModifiedTime *time.Time + + // The query string for the prepared statement. + QueryStatement *string + + // The name of the prepared statement. + StatementName *string + + // The name of the workgroup to which the prepared statement belongs. + WorkGroupName *string + + noSmithyDocumentSerde +} + +// The name and last modified time of the prepared statement. +type PreparedStatementSummary struct { + + // The last modified time of the prepared statement. + LastModifiedTime *time.Time + + // The name of the prepared statement. + StatementName *string + + noSmithyDocumentSerde +} + +// Information about a single instance of a query execution. +type QueryExecution struct { + + // The engine version that executed the query. + EngineVersion *EngineVersion + + // A list of values for the parameters in a query. The values are applied + // sequentially to the parameters in the query in the order in which the parameters + // occur. The list of parameters is not returned in the response. + ExecutionParameters []string + + // The SQL query statements which the query execution ran. + Query *string + + // The database in which the query execution occurred. + QueryExecutionContext *QueryExecutionContext + + // The unique identifier for each query execution. + QueryExecutionId *string + + // Specifies whether Amazon S3 access grants are enabled for query results. + QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration + + // The location in Amazon S3 where query and calculation results are stored and + // the encryption option, if any, used for query results. These are known as + // "client-side settings". If workgroup settings override client-side settings, + // then the query uses the location for the query results and the encryption + // configuration that are specified for the workgroup. + ResultConfiguration *ResultConfiguration + + // Specifies the query result reuse behavior that was used for the query. + ResultReuseConfiguration *ResultReuseConfiguration + + // The type of query statement that was run. DDL indicates DDL query statements. + // DML indicates DML (Data Manipulation Language) query statements, such as CREATE + // TABLE AS SELECT . UTILITY indicates query statements other than DDL and DML, + // such as SHOW CREATE TABLE , or DESCRIBE TABLE . + StatementType StatementType + + // Query execution statistics, such as the amount of data scanned, the amount of + // time that the query took to process, and the type of statement that was run. + Statistics *QueryExecutionStatistics + + // The completion date, current state, submission time, and state change reason + // (if applicable) for the query execution. + Status *QueryExecutionStatus + + // The kind of query statement that was run. + SubstatementType *string + + // The name of the workgroup in which the query ran. + WorkGroup *string + + noSmithyDocumentSerde +} + +// The database and data catalog context in which the query execution occurs. +type QueryExecutionContext struct { + + // The name of the data catalog used in the query execution. + Catalog *string + + // The name of the database used in the query execution. The database must exist + // in the catalog. + Database *string + + noSmithyDocumentSerde +} + +// The amount of data scanned during the query execution and the amount of time +// that it took to execute, and the type of statement that was run. +type QueryExecutionStatistics struct { + + // The location and file name of a data manifest file. The manifest file is saved + // to the Athena query results location in Amazon S3. The manifest file tracks + // files that the query wrote to Amazon S3. If the query fails, the manifest file + // also tracks files that the query intended to write. The manifest is useful for + // identifying orphaned files resulting from a failed query. For more information, + // see [Working with Query Results, Output Files, and Query History]in the Amazon Athena User Guide. + // + // [Working with Query Results, Output Files, and Query History]: https://docs.aws.amazon.com/athena/latest/ug/querying.html + DataManifestLocation *string + + // The number of bytes in the data that was queried. + DataScannedInBytes *int64 + + // The number of milliseconds that the query took to execute. + EngineExecutionTimeInMillis *int64 + + // The number of milliseconds that Athena took to plan the query processing flow. + // This includes the time spent retrieving table partitions from the data source. + // Note that because the query engine performs the query planning, query planning + // time is a subset of engine processing time. + QueryPlanningTimeInMillis *int64 + + // The number of milliseconds that the query was in your query queue waiting for + // resources. Note that if transient errors occur, Athena might automatically add + // the query back to the queue. + QueryQueueTimeInMillis *int64 + + // Contains information about whether previous query results were reused for the + // query. + ResultReuseInformation *ResultReuseInformation + + // The number of milliseconds that Athena took to preprocess the query before + // submitting the query to the query engine. + ServicePreProcessingTimeInMillis *int64 + + // The number of milliseconds that Athena took to finalize and publish the query + // results after the query engine finished running the query. + ServiceProcessingTimeInMillis *int64 + + // The number of milliseconds that Athena took to run the query. + TotalExecutionTimeInMillis *int64 + + noSmithyDocumentSerde +} + +// The completion date, current state, submission time, and state change reason +// (if applicable) for the query execution. +type QueryExecutionStatus struct { + + // Provides information about an Athena query error. + AthenaError *AthenaError + + // The date and time that the query completed. + CompletionDateTime *time.Time + + // The state of query execution. QUEUED indicates that the query has been + // submitted to the service, and Athena will execute the query as soon as resources + // are available. RUNNING indicates that the query is in execution phase. SUCCEEDED + // indicates that the query completed without errors. FAILED indicates that the + // query experienced an error and did not complete processing. CANCELLED indicates + // that a user input interrupted query execution. + // + // Athena automatically retries your queries in cases of certain transient errors. + // As a result, you may see the query state transition from RUNNING or FAILED to + // QUEUED . + State QueryExecutionState + + // Further detail about the status of the query. + StateChangeReason *string + + // The date and time that the query was submitted. + SubmissionDateTime *time.Time + + noSmithyDocumentSerde +} + +// Specifies whether Amazon S3 access grants are enabled for query results. +type QueryResultsS3AccessGrantsConfiguration struct { + + // The authentication type used for Amazon S3 access grants. Currently, only + // DIRECTORY_IDENTITY is supported. + // + // This member is required. + AuthenticationType AuthenticationType + + // Specifies whether Amazon S3 access grants are enabled for query results. + // + // This member is required. + EnableS3AccessGrants *bool + + // When enabled, appends the user ID as an Amazon S3 path prefix to the query + // result output location. + CreateUserLevelPrefix *bool + + noSmithyDocumentSerde +} + +// The query execution timeline, statistics on input and output rows and bytes, +// and the different query stages that form the query execution plan. +type QueryRuntimeStatistics struct { + + // Stage statistics such as input and output rows and bytes, execution time, and + // stage state. This information also includes substages and the query stage plan. + OutputStage *QueryStage + + // Statistics such as input rows and bytes read by the query, rows and bytes + // output by the query, and the number of rows written by the query. + Rows *QueryRuntimeStatisticsRows + + // Timeline statistics such as query queue time, planning time, execution time, + // service processing time, and total execution time. + Timeline *QueryRuntimeStatisticsTimeline + + noSmithyDocumentSerde +} + +// Statistics such as input rows and bytes read by the query, rows and bytes +// output by the query, and the number of rows written by the query. +type QueryRuntimeStatisticsRows struct { + + // The number of bytes read to execute the query. + InputBytes *int64 + + // The number of rows read to execute the query. + InputRows *int64 + + // The number of bytes returned by the query. + OutputBytes *int64 + + // The number of rows returned by the query. + OutputRows *int64 + + noSmithyDocumentSerde +} + +// Timeline statistics such as query queue time, planning time, execution time, +// service processing time, and total execution time. +type QueryRuntimeStatisticsTimeline struct { + + // The number of milliseconds that the query took to execute. + EngineExecutionTimeInMillis *int64 + + // The number of milliseconds that Athena took to plan the query processing flow. + // This includes the time spent retrieving table partitions from the data source. + // Note that because the query engine performs the query planning, query planning + // time is a subset of engine processing time. + QueryPlanningTimeInMillis *int64 + + // The number of milliseconds that the query was in your query queue waiting for + // resources. Note that if transient errors occur, Athena might automatically add + // the query back to the queue. + QueryQueueTimeInMillis *int64 + + // The number of milliseconds that Athena spends on preprocessing before it + // submits the query to the engine. + ServicePreProcessingTimeInMillis *int64 + + // The number of milliseconds that Athena took to finalize and publish the query + // results after the query engine finished running the query. + ServiceProcessingTimeInMillis *int64 + + // The number of milliseconds that Athena took to run the query. + TotalExecutionTimeInMillis *int64 + + noSmithyDocumentSerde +} + +// Stage statistics such as input and output rows and bytes, execution time and +// stage state. This information also includes substages and the query stage plan. +type QueryStage struct { + + // Time taken to execute this stage. + ExecutionTime *int64 + + // The number of bytes input into the stage for execution. + InputBytes *int64 + + // The number of rows input into the stage for execution. + InputRows *int64 + + // The number of bytes output from the stage after execution. + OutputBytes *int64 + + // The number of rows output from the stage after execution. + OutputRows *int64 + + // Stage plan information such as name, identifier, sub plans, and source stages. + QueryStagePlan *QueryStagePlanNode + + // The identifier for a stage. + StageId *int64 + + // State of the stage after query execution. + State *string + + // List of sub query stages that form this stage execution plan. + SubStages []QueryStage + + noSmithyDocumentSerde +} + +// Stage plan information such as name, identifier, sub plans, and remote sources. +type QueryStagePlanNode struct { + + // Stage plan information such as name, identifier, sub plans, and remote sources + // of child plan nodes/ + Children []QueryStagePlanNode + + // Information about the operation this query stage plan node is performing. + Identifier *string + + // Name of the query stage plan that describes the operation this stage is + // performing as part of query execution. + Name *string + + // Source plan node IDs. + RemoteSources []string + + noSmithyDocumentSerde +} + +// The location in Amazon S3 where query and calculation results are stored and +// the encryption option, if any, used for query and calculation results. These are +// known as "client-side settings". If workgroup settings override client-side +// settings, then the query uses the workgroup settings. +type ResultConfiguration struct { + + // Indicates that an Amazon S3 canned ACL should be set to control ownership of + // stored query results. Currently the only supported canned ACL is + // BUCKET_OWNER_FULL_CONTROL . This is a client-side setting. If workgroup settings + // override client-side settings, then the query uses the ACL configuration that is + // specified for the workgroup, and also uses the location for storing query + // results specified in the workgroup. For more information, see WorkGroupConfiguration$EnforceWorkGroupConfigurationand [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + AclConfiguration *AclConfiguration + + // If query and calculation results are encrypted in Amazon S3, indicates the + // encryption option used (for example, SSE_KMS or CSE_KMS ) and key information. + // This is a client-side setting. If workgroup settings override client-side + // settings, then the query uses the encryption configuration that is specified for + // the workgroup, and also uses the location for storing query results specified in + // the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfigurationand [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + EncryptionConfiguration *EncryptionConfiguration + + // The Amazon Web Services account ID that you expect to be the owner of the + // Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for + // ExpectedBucketOwner when it makes Amazon S3 calls to your specified output + // location. If the ExpectedBucketOwner Amazon Web Services account ID does not + // match the actual owner of the Amazon S3 bucket, the call fails with a + // permissions error. + // + // This is a client-side setting. If workgroup settings override client-side + // settings, then the query uses the ExpectedBucketOwner setting that is specified + // for the workgroup, and also uses the location for storing query results + // specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfigurationand [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + ExpectedBucketOwner *string + + // The location in Amazon S3 where your query and calculation results are stored, + // such as s3://path/to/query/bucket/ . To run the query, you must specify the + // query results location using one of the ways: either for individual queries + // using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. If none + // of them is set, Athena issues an error that no output location is provided. If + // workgroup settings override client-side settings, then the query uses the + // settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + OutputLocation *string + + noSmithyDocumentSerde +} + +// The information about the updates in the query results, such as output location +// and encryption configuration for the query results. +type ResultConfigurationUpdates struct { + + // The ACL configuration for the query results. + AclConfiguration *AclConfiguration + + // The encryption configuration for query and calculation results. + EncryptionConfiguration *EncryptionConfiguration + + // The Amazon Web Services account ID that you expect to be the owner of the + // Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, Athena uses the value for + // ExpectedBucketOwner when it makes Amazon S3 calls to your specified output + // location. If the ExpectedBucketOwner Amazon Web Services account ID does not + // match the actual owner of the Amazon S3 bucket, the call fails with a + // permissions error. + // + // If workgroup settings override client-side settings, then the query uses the + // ExpectedBucketOwner setting that is specified for the workgroup, and also uses + // the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfigurationand [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + ExpectedBucketOwner *string + + // The location in Amazon S3 where your query and calculation results are stored, + // such as s3://path/to/query/bucket/ . If workgroup settings override client-side + // settings, then the query uses the location for the query results and the + // encryption configuration that are specified for the workgroup. The "workgroup + // settings override" is specified in EnforceWorkGroupConfiguration (true/false) + // in the WorkGroupConfiguration . See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + OutputLocation *string + + // If set to true , indicates that the previously-specified ACL configuration for + // queries in this workgroup should be ignored and set to null. If set to false or + // not set, and a value is present in the AclConfiguration of + // ResultConfigurationUpdates , the AclConfiguration in the workgroup's + // ResultConfiguration is updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + RemoveAclConfiguration *bool + + // If set to "true", indicates that the previously-specified encryption + // configuration (also known as the client-side setting) for queries in this + // workgroup should be ignored and set to null. If set to "false" or not set, and a + // value is present in the EncryptionConfiguration in ResultConfigurationUpdates + // (the client-side setting), the EncryptionConfiguration in the workgroup's + // ResultConfiguration will be updated with the new value. For more information, + // see [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + RemoveEncryptionConfiguration *bool + + // If set to "true", removes the Amazon Web Services account ID previously + // specified for ResultConfiguration$ExpectedBucketOwner. If set to "false" or not set, and a value is present in the + // ExpectedBucketOwner in ResultConfigurationUpdates (the client-side setting), + // the ExpectedBucketOwner in the workgroup's ResultConfiguration is updated with + // the new value. For more information, see [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + RemoveExpectedBucketOwner *bool + + // If set to "true", indicates that the previously-specified query results + // location (also known as a client-side setting) for queries in this workgroup + // should be ignored and set to null. If set to "false" or not set, and a value is + // present in the OutputLocation in ResultConfigurationUpdates (the client-side + // setting), the OutputLocation in the workgroup's ResultConfiguration will be + // updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + RemoveOutputLocation *bool + + noSmithyDocumentSerde +} + +// Specifies whether previous query results are reused, and if so, their maximum +// age. +type ResultReuseByAgeConfiguration struct { + + // True if previous query results can be reused when the query is run; otherwise, + // false. The default is false. + // + // This member is required. + Enabled bool + + // Specifies, in minutes, the maximum age of a previous query result that Athena + // should consider for reuse. The default is 60. + MaxAgeInMinutes *int32 + + noSmithyDocumentSerde +} + +// Specifies the query result reuse behavior for the query. +type ResultReuseConfiguration struct { + + // Specifies whether previous query results are reused, and if so, their maximum + // age. + ResultReuseByAgeConfiguration *ResultReuseByAgeConfiguration + + noSmithyDocumentSerde +} + +// Contains information about whether the result of a previous query was reused. +type ResultReuseInformation struct { + + // True if a previous query result was reused; false if the result was generated + // from a new run of the query. + // + // This member is required. + ReusedPreviousResult bool + + noSmithyDocumentSerde +} + +// The metadata and rows that make up a query result set. The metadata describes +// the column structure and data types. To return a ResultSet object, use GetQueryResults. +type ResultSet struct { + + // The metadata that describes the column structure and data types of a table of + // query results. + ResultSetMetadata *ResultSetMetadata + + // The rows in the table. + Rows []Row + + noSmithyDocumentSerde +} + +// The metadata that describes the column structure and data types of a table of +// query results. To return a ResultSetMetadata object, use GetQueryResults. +type ResultSetMetadata struct { + + // Information about the columns returned in a query result metadata. + ColumnInfo []ColumnInfo + + noSmithyDocumentSerde +} + +// The rows that make up a query result table. +type Row struct { + + // The data that populates a row in a query result table. + Data []Datum + + noSmithyDocumentSerde +} + +// Contains session configuration information. +type SessionConfiguration struct { + + // If query and calculation results are encrypted in Amazon S3, indicates the + // encryption option used (for example, SSE_KMS or CSE_KMS ) and key information. + EncryptionConfiguration *EncryptionConfiguration + + // The ARN of the execution role used to access user resources for Spark sessions + // and Identity Center enabled workgroups. This property applies only to Spark + // enabled workgroups and Identity Center enabled workgroups. + ExecutionRole *string + + // The idle timeout in seconds for the session. + IdleTimeoutSeconds *int64 + + // The Amazon S3 location that stores information for the notebook. + WorkingDirectory *string + + noSmithyDocumentSerde +} + +// Contains statistics for a session. +type SessionStatistics struct { + + // The data processing unit execution time for a session in milliseconds. + DpuExecutionInMillis *int64 + + noSmithyDocumentSerde +} + +// Contains information about the status of a session. +type SessionStatus struct { + + // The date and time that the session ended. + EndDateTime *time.Time + + // The date and time starting at which the session became idle. Can be empty if + // the session is not currently idle. + IdleSinceDateTime *time.Time + + // The most recent date and time that the session was modified. + LastModifiedDateTime *time.Time + + // The date and time that the session started. + StartDateTime *time.Time + + // The state of the session. A description of each state follows. + // + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a + // calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + State SessionState + + // The reason for the session state change (for example, canceled because the + // session was terminated). + StateChangeReason *string + + noSmithyDocumentSerde +} + +// Contains summary information about a session. +type SessionSummary struct { + + // The session description. + Description *string + + // The engine version used by the session (for example, PySpark engine version 3 ). + EngineVersion *EngineVersion + + // The notebook version. + NotebookVersion *string + + // The session ID. + SessionId *string + + // Contains information about the session status. + Status *SessionStatus + + noSmithyDocumentSerde +} + +// Contains metadata for a table. +type TableMetadata struct { + + // The name of the table. + // + // This member is required. + Name *string + + // A list of the columns in the table. + Columns []Column + + // The time that the table was created. + CreateTime *time.Time + + // The last time the table was accessed. + LastAccessTime *time.Time + + // A set of custom key/value pairs for table properties. + Parameters map[string]string + + // A list of the partition keys in the table. + PartitionKeys []Column + + // The type of table. In Athena, only EXTERNAL_TABLE is supported. + TableType *string + + noSmithyDocumentSerde +} + +// A label that you assign to a resource. Athena resources include workgroups, +// data catalogs, and capacity reservations. Each tag consists of a key and an +// optional value, both of which you define. For example, you can use tags to +// categorize Athena resources by purpose, owner, or environment. Use a consistent +// set of tag keys to make it easier to search and filter the resources in your +// account. For best practices, see [Tagging Best Practices]. Tag keys can be from 1 to 128 UTF-8 Unicode +// characters, and tag values can be from 0 to 256 UTF-8 Unicode characters. Tags +// can use letters and numbers representable in UTF-8, and the following +// characters: + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys +// must be unique per resource. If you specify more than one tag, separate them by +// commas. +// +// [Tagging Best Practices]: https://docs.aws.amazon.com/whitepapers/latest/tagging-best-practices/tagging-best-practices.html +type Tag struct { + + // A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. You + // can use letters and numbers representable in UTF-8, and the following + // characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique per + // resource. + Key *string + + // A tag value. The tag value length is from 0 to 256 Unicode characters in UTF-8. + // You can use letters and numbers representable in UTF-8, and the following + // characters: + - = . _ : / @. Tag values are case-sensitive. + Value *string + + noSmithyDocumentSerde +} + +// Information about a named query ID that could not be processed. +type UnprocessedNamedQueryId struct { + + // The error code returned when the processing request for the named query failed, + // if applicable. + ErrorCode *string + + // The error message returned when the processing request for the named query + // failed, if applicable. + ErrorMessage *string + + // The unique identifier of the named query. + NamedQueryId *string + + noSmithyDocumentSerde +} + +// The name of a prepared statement that could not be returned. +type UnprocessedPreparedStatementName struct { + + // The error code returned when the request for the prepared statement failed. + ErrorCode *string + + // The error message containing the reason why the prepared statement could not be + // returned. The following error messages are possible: + // + // - INVALID_INPUT - The name of the prepared statement that was provided is not + // valid (for example, the name is too long). + // + // - STATEMENT_NOT_FOUND - A prepared statement with the name provided could not + // be found. + // + // - UNAUTHORIZED - The requester does not have permission to access the + // workgroup that contains the prepared statement. + ErrorMessage *string + + // The name of a prepared statement that could not be returned due to an error. + StatementName *string + + noSmithyDocumentSerde +} + +// Describes a query execution that failed to process. +type UnprocessedQueryExecutionId struct { + + // The error code returned when the query execution failed to process, if + // applicable. + ErrorCode *string + + // The error message returned when the query execution failed to process, if + // applicable. + ErrorMessage *string + + // The unique identifier of the query execution. + QueryExecutionId *string + + noSmithyDocumentSerde +} + +// A workgroup, which contains a name, description, creation time, state, and +// other configuration, listed under WorkGroup$Configuration. Each workgroup enables you to isolate +// queries for you or your group of users from other queries in the same account, +// to configure the query results location and the encryption configuration (known +// as workgroup settings), to enable sending query metrics to Amazon CloudWatch, +// and to establish per-query data usage control limits for all queries in a +// workgroup. The workgroup settings override is specified in +// EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration . See WorkGroupConfiguration$EnforceWorkGroupConfiguration. +type WorkGroup struct { + + // The workgroup name. + // + // This member is required. + Name *string + + // The configuration of the workgroup, which includes the location in Amazon S3 + // where query and calculation results are stored, the encryption configuration, if + // any, used for query and calculation results; whether the Amazon CloudWatch + // Metrics are enabled for the workgroup; whether workgroup settings override + // client-side settings; and the data usage limits for the amount of data scanned + // per query or per workgroup. The workgroup settings override is specified in + // EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration . See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + Configuration *WorkGroupConfiguration + + // The date and time the workgroup was created. + CreationTime *time.Time + + // The workgroup description. + Description *string + + // The ARN of the IAM Identity Center enabled application associated with the + // workgroup. + IdentityCenterApplicationArn *string + + // The state of the workgroup: ENABLED or DISABLED. + State WorkGroupState + + noSmithyDocumentSerde +} + +// The configuration of the workgroup, which includes the location in Amazon S3 +// where query and calculation results are stored, the encryption option, if any, +// used for query and calculation results, whether the Amazon CloudWatch Metrics +// are enabled for the workgroup and whether workgroup settings override query +// settings, and the data usage limits for the amount of data scanned per query or +// per workgroup. The workgroup settings override is specified in +// EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration . See WorkGroupConfiguration$EnforceWorkGroupConfiguration +// . +type WorkGroupConfiguration struct { + + // Specifies a user defined JSON string that is passed to the notebook engine. + AdditionalConfiguration *string + + // The upper data usage limit (cutoff) for the amount of bytes a single query in a + // workgroup is allowed to scan. + BytesScannedCutoffPerQuery *int64 + + // Specifies the KMS key that is used to encrypt the user's data stores in Athena. + // This setting does not apply to Athena SQL workgroups. + CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration + + // Enforces a minimal level of encryption for the workgroup for query and + // calculation results that are written to Amazon S3. When enabled, workgroup users + // can set encryption only to the minimum level set by the administrator or higher + // when they submit queries. + // + // The EnforceWorkGroupConfiguration setting takes precedence over the + // EnableMinimumEncryptionConfiguration flag. This means that if + // EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration + // flag is ignored, and the workgroup configuration for encryption is used. + EnableMinimumEncryptionConfiguration *bool + + // If set to "true", the settings for the workgroup override client-side settings. + // If set to "false", client-side settings are used. For more information, see [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + EnforceWorkGroupConfiguration *bool + + // The engine version that all queries running on the workgroup use. Queries on + // the AmazonAthenaPreviewFunctionality workgroup run on the preview engine + // regardless of this setting. + EngineVersion *EngineVersion + + // The ARN of the execution role used to access user resources for Spark sessions + // and IAM Identity Center enabled workgroups. This property applies only to Spark + // enabled workgroups and IAM Identity Center enabled workgroups. The property is + // required for IAM Identity Center enabled workgroups. + ExecutionRole *string + + // Specifies whether the workgroup is IAM Identity Center supported. + IdentityCenterConfiguration *IdentityCenterConfiguration + + // Indicates that the Amazon CloudWatch metrics are enabled for the workgroup. + PublishCloudWatchMetricsEnabled *bool + + // Specifies whether Amazon S3 access grants are enabled for query results. + QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration + + // If set to true , allows members assigned to a workgroup to reference Amazon S3 + // Requester Pays buckets in queries. If set to false , workgroup members cannot + // query data from Requester Pays buckets, and queries that retrieve data from + // Requester Pays buckets cause an error. The default is false . For more + // information about Requester Pays buckets, see [Requester Pays Buckets]in the Amazon Simple Storage + // Service Developer Guide. + // + // [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html + RequesterPaysEnabled *bool + + // The configuration for the workgroup, which includes the location in Amazon S3 + // where query and calculation results are stored and the encryption option, if + // any, used for query and calculation results. To run the query, you must specify + // the query results location using one of the ways: either in the workgroup using + // this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. If none of them + // is set, Athena issues an error that no output location is provided. + ResultConfiguration *ResultConfiguration + + noSmithyDocumentSerde +} + +// The configuration information that will be updated for this workgroup, which +// includes the location in Amazon S3 where query and calculation results are +// stored, the encryption option, if any, used for query results, whether the +// Amazon CloudWatch Metrics are enabled for the workgroup, whether the workgroup +// settings override the client-side settings, and the data usage limit for the +// amount of bytes scanned per query, if it is specified. +type WorkGroupConfigurationUpdates struct { + + // Contains a user defined string in JSON format for a Spark-enabled workgroup. + AdditionalConfiguration *string + + // The upper limit (cutoff) for the amount of bytes a single query in a workgroup + // is allowed to scan. + BytesScannedCutoffPerQuery *int64 + + // Specifies the customer managed KMS key that is used to encrypt the user's data + // stores in Athena. When an Amazon Web Services managed key is used, this value is + // null. This setting does not apply to Athena SQL workgroups. + CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration + + // Enforces a minimal level of encryption for the workgroup for query and + // calculation results that are written to Amazon S3. When enabled, workgroup users + // can set encryption only to the minimum level set by the administrator or higher + // when they submit queries. This setting does not apply to Spark-enabled + // workgroups. + // + // The EnforceWorkGroupConfiguration setting takes precedence over the + // EnableMinimumEncryptionConfiguration flag. This means that if + // EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration + // flag is ignored, and the workgroup configuration for encryption is used. + EnableMinimumEncryptionConfiguration *bool + + // If set to "true", the settings for the workgroup override client-side settings. + // If set to "false" client-side settings are used. For more information, see [Workgroup Settings Override Client-Side Settings]. + // + // [Workgroup Settings Override Client-Side Settings]: https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html + EnforceWorkGroupConfiguration *bool + + // The engine version requested when a workgroup is updated. After the update, all + // queries on the workgroup run on the requested engine version. If no value was + // previously set, the default is Auto. Queries on the + // AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless + // of this setting. + EngineVersion *EngineVersion + + // The ARN of the execution role used to access user resources for Spark sessions + // and Identity Center enabled workgroups. This property applies only to Spark + // enabled workgroups and Identity Center enabled workgroups. + ExecutionRole *string + + // Indicates whether this workgroup enables publishing metrics to Amazon + // CloudWatch. + PublishCloudWatchMetricsEnabled *bool + + // Specifies whether Amazon S3 access grants are enabled for query results. + QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration + + // Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery + RemoveBytesScannedCutoffPerQuery *bool + + // Removes content encryption configuration from an Apache Spark-enabled Athena + // workgroup. + RemoveCustomerContentEncryptionConfiguration *bool + + // If set to true , allows members assigned to a workgroup to specify Amazon S3 + // Requester Pays buckets in queries. If set to false , workgroup members cannot + // query data from Requester Pays buckets, and queries that retrieve data from + // Requester Pays buckets cause an error. The default is false . For more + // information about Requester Pays buckets, see [Requester Pays Buckets]in the Amazon Simple Storage + // Service Developer Guide. + // + // [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html + RequesterPaysEnabled *bool + + // The result configuration information about the queries in this workgroup that + // will be updated. Includes the updated results location and an updated option for + // encrypting query results. + ResultConfigurationUpdates *ResultConfigurationUpdates + + noSmithyDocumentSerde +} + +// The summary information for the workgroup, which includes its name, state, +// description, and the date and time it was created. +type WorkGroupSummary struct { + + // The workgroup creation date and time. + CreationTime *time.Time + + // The workgroup description. + Description *string + + // The engine version setting for all queries on the workgroup. Queries on the + // AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless + // of this setting. + EngineVersion *EngineVersion + + // The ARN of the IAM Identity Center enabled application associated with the + // workgroup. + IdentityCenterApplicationArn *string + + // The name of the workgroup. + Name *string + + // The state of the workgroup. + State WorkGroupState + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/athena/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/validators.go new file mode 100644 index 00000000..4cc784b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/athena/validators.go @@ -0,0 +1,2706 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/athena/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpBatchGetNamedQuery struct { +} + +func (*validateOpBatchGetNamedQuery) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchGetNamedQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchGetNamedQueryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchGetNamedQueryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchGetPreparedStatement struct { +} + +func (*validateOpBatchGetPreparedStatement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchGetPreparedStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchGetPreparedStatementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchGetPreparedStatementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpBatchGetQueryExecution struct { +} + +func (*validateOpBatchGetQueryExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpBatchGetQueryExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*BatchGetQueryExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpBatchGetQueryExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCancelCapacityReservation struct { +} + +func (*validateOpCancelCapacityReservation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCancelCapacityReservation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CancelCapacityReservationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCancelCapacityReservationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateCapacityReservation struct { +} + +func (*validateOpCreateCapacityReservation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCapacityReservation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateCapacityReservationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateCapacityReservationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateDataCatalog struct { +} + +func (*validateOpCreateDataCatalog) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateDataCatalog) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateDataCatalogInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateDataCatalogInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateNamedQuery struct { +} + +func (*validateOpCreateNamedQuery) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateNamedQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateNamedQueryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateNamedQueryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateNotebook struct { +} + +func (*validateOpCreateNotebook) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateNotebook) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateNotebookInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateNotebookInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreatePreparedStatement struct { +} + +func (*validateOpCreatePreparedStatement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreatePreparedStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreatePreparedStatementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreatePreparedStatementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreatePresignedNotebookUrl struct { +} + +func (*validateOpCreatePresignedNotebookUrl) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreatePresignedNotebookUrl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreatePresignedNotebookUrlInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreatePresignedNotebookUrlInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateWorkGroup struct { +} + +func (*validateOpCreateWorkGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateWorkGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateWorkGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateWorkGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteCapacityReservation struct { +} + +func (*validateOpDeleteCapacityReservation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteCapacityReservation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteCapacityReservationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteCapacityReservationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteDataCatalog struct { +} + +func (*validateOpDeleteDataCatalog) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteDataCatalog) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteDataCatalogInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteDataCatalogInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteNamedQuery struct { +} + +func (*validateOpDeleteNamedQuery) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteNamedQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteNamedQueryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteNamedQueryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteNotebook struct { +} + +func (*validateOpDeleteNotebook) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteNotebook) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteNotebookInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteNotebookInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeletePreparedStatement struct { +} + +func (*validateOpDeletePreparedStatement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeletePreparedStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeletePreparedStatementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeletePreparedStatementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteWorkGroup struct { +} + +func (*validateOpDeleteWorkGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteWorkGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteWorkGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteWorkGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpExportNotebook struct { +} + +func (*validateOpExportNotebook) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpExportNotebook) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ExportNotebookInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpExportNotebookInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetCalculationExecutionCode struct { +} + +func (*validateOpGetCalculationExecutionCode) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetCalculationExecutionCode) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetCalculationExecutionCodeInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetCalculationExecutionCodeInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetCalculationExecution struct { +} + +func (*validateOpGetCalculationExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetCalculationExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetCalculationExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetCalculationExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetCalculationExecutionStatus struct { +} + +func (*validateOpGetCalculationExecutionStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetCalculationExecutionStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetCalculationExecutionStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetCalculationExecutionStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetCapacityAssignmentConfiguration struct { +} + +func (*validateOpGetCapacityAssignmentConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetCapacityAssignmentConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetCapacityAssignmentConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetCapacityAssignmentConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetCapacityReservation struct { +} + +func (*validateOpGetCapacityReservation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetCapacityReservation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetCapacityReservationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetCapacityReservationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetDatabase struct { +} + +func (*validateOpGetDatabase) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetDatabase) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetDatabaseInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetDatabaseInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetDataCatalog struct { +} + +func (*validateOpGetDataCatalog) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetDataCatalog) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetDataCatalogInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetDataCatalogInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetNamedQuery struct { +} + +func (*validateOpGetNamedQuery) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetNamedQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetNamedQueryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetNamedQueryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetNotebookMetadata struct { +} + +func (*validateOpGetNotebookMetadata) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetNotebookMetadata) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetNotebookMetadataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetNotebookMetadataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetPreparedStatement struct { +} + +func (*validateOpGetPreparedStatement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetPreparedStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetPreparedStatementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetPreparedStatementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetQueryExecution struct { +} + +func (*validateOpGetQueryExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetQueryExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetQueryExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetQueryExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetQueryResults struct { +} + +func (*validateOpGetQueryResults) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetQueryResults) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetQueryResultsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetQueryResultsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetQueryRuntimeStatistics struct { +} + +func (*validateOpGetQueryRuntimeStatistics) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetQueryRuntimeStatistics) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetQueryRuntimeStatisticsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetQueryRuntimeStatisticsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetSession struct { +} + +func (*validateOpGetSession) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetSessionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetSessionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetSessionStatus struct { +} + +func (*validateOpGetSessionStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetSessionStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetSessionStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetSessionStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetTableMetadata struct { +} + +func (*validateOpGetTableMetadata) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetTableMetadata) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetTableMetadataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetTableMetadataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetWorkGroup struct { +} + +func (*validateOpGetWorkGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetWorkGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetWorkGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetWorkGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpImportNotebook struct { +} + +func (*validateOpImportNotebook) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpImportNotebook) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ImportNotebookInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpImportNotebookInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListCalculationExecutions struct { +} + +func (*validateOpListCalculationExecutions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListCalculationExecutions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListCalculationExecutionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListCalculationExecutionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListDatabases struct { +} + +func (*validateOpListDatabases) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListDatabases) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListDatabasesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListDatabasesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListExecutors struct { +} + +func (*validateOpListExecutors) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListExecutors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListExecutorsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListExecutorsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListNotebookMetadata struct { +} + +func (*validateOpListNotebookMetadata) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListNotebookMetadata) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListNotebookMetadataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListNotebookMetadataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListNotebookSessions struct { +} + +func (*validateOpListNotebookSessions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListNotebookSessions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListNotebookSessionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListNotebookSessionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListPreparedStatements struct { +} + +func (*validateOpListPreparedStatements) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListPreparedStatements) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListPreparedStatementsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListPreparedStatementsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListSessions struct { +} + +func (*validateOpListSessions) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListSessions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListSessionsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListSessionsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTableMetadata struct { +} + +func (*validateOpListTableMetadata) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTableMetadata) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTableMetadataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTableMetadataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsForResource struct { +} + +func (*validateOpListTagsForResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsForResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsForResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutCapacityAssignmentConfiguration struct { +} + +func (*validateOpPutCapacityAssignmentConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutCapacityAssignmentConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutCapacityAssignmentConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutCapacityAssignmentConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartCalculationExecution struct { +} + +func (*validateOpStartCalculationExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartCalculationExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartCalculationExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartCalculationExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartQueryExecution struct { +} + +func (*validateOpStartQueryExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartQueryExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartQueryExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartQueryExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartSession struct { +} + +func (*validateOpStartSession) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartSessionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartSessionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStopCalculationExecution struct { +} + +func (*validateOpStopCalculationExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStopCalculationExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StopCalculationExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStopCalculationExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStopQueryExecution struct { +} + +func (*validateOpStopQueryExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStopQueryExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StopQueryExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStopQueryExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTerminateSession struct { +} + +func (*validateOpTerminateSession) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTerminateSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TerminateSessionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTerminateSessionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateCapacityReservation struct { +} + +func (*validateOpUpdateCapacityReservation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateCapacityReservation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateCapacityReservationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateCapacityReservationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateDataCatalog struct { +} + +func (*validateOpUpdateDataCatalog) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateDataCatalog) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateDataCatalogInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateDataCatalogInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateNamedQuery struct { +} + +func (*validateOpUpdateNamedQuery) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateNamedQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateNamedQueryInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateNamedQueryInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateNotebook struct { +} + +func (*validateOpUpdateNotebook) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateNotebook) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateNotebookInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateNotebookInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateNotebookMetadata struct { +} + +func (*validateOpUpdateNotebookMetadata) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateNotebookMetadata) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateNotebookMetadataInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateNotebookMetadataInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdatePreparedStatement struct { +} + +func (*validateOpUpdatePreparedStatement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdatePreparedStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdatePreparedStatementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdatePreparedStatementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateWorkGroup struct { +} + +func (*validateOpUpdateWorkGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateWorkGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateWorkGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateWorkGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpBatchGetNamedQueryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchGetNamedQuery{}, middleware.After) +} + +func addOpBatchGetPreparedStatementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchGetPreparedStatement{}, middleware.After) +} + +func addOpBatchGetQueryExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpBatchGetQueryExecution{}, middleware.After) +} + +func addOpCancelCapacityReservationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCancelCapacityReservation{}, middleware.After) +} + +func addOpCreateCapacityReservationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCapacityReservation{}, middleware.After) +} + +func addOpCreateDataCatalogValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateDataCatalog{}, middleware.After) +} + +func addOpCreateNamedQueryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateNamedQuery{}, middleware.After) +} + +func addOpCreateNotebookValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateNotebook{}, middleware.After) +} + +func addOpCreatePreparedStatementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreatePreparedStatement{}, middleware.After) +} + +func addOpCreatePresignedNotebookUrlValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreatePresignedNotebookUrl{}, middleware.After) +} + +func addOpCreateWorkGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateWorkGroup{}, middleware.After) +} + +func addOpDeleteCapacityReservationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteCapacityReservation{}, middleware.After) +} + +func addOpDeleteDataCatalogValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteDataCatalog{}, middleware.After) +} + +func addOpDeleteNamedQueryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteNamedQuery{}, middleware.After) +} + +func addOpDeleteNotebookValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteNotebook{}, middleware.After) +} + +func addOpDeletePreparedStatementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeletePreparedStatement{}, middleware.After) +} + +func addOpDeleteWorkGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteWorkGroup{}, middleware.After) +} + +func addOpExportNotebookValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpExportNotebook{}, middleware.After) +} + +func addOpGetCalculationExecutionCodeValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetCalculationExecutionCode{}, middleware.After) +} + +func addOpGetCalculationExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetCalculationExecution{}, middleware.After) +} + +func addOpGetCalculationExecutionStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetCalculationExecutionStatus{}, middleware.After) +} + +func addOpGetCapacityAssignmentConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetCapacityAssignmentConfiguration{}, middleware.After) +} + +func addOpGetCapacityReservationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetCapacityReservation{}, middleware.After) +} + +func addOpGetDatabaseValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetDatabase{}, middleware.After) +} + +func addOpGetDataCatalogValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetDataCatalog{}, middleware.After) +} + +func addOpGetNamedQueryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetNamedQuery{}, middleware.After) +} + +func addOpGetNotebookMetadataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetNotebookMetadata{}, middleware.After) +} + +func addOpGetPreparedStatementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetPreparedStatement{}, middleware.After) +} + +func addOpGetQueryExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetQueryExecution{}, middleware.After) +} + +func addOpGetQueryResultsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetQueryResults{}, middleware.After) +} + +func addOpGetQueryRuntimeStatisticsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetQueryRuntimeStatistics{}, middleware.After) +} + +func addOpGetSessionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetSession{}, middleware.After) +} + +func addOpGetSessionStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetSessionStatus{}, middleware.After) +} + +func addOpGetTableMetadataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetTableMetadata{}, middleware.After) +} + +func addOpGetWorkGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetWorkGroup{}, middleware.After) +} + +func addOpImportNotebookValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpImportNotebook{}, middleware.After) +} + +func addOpListCalculationExecutionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListCalculationExecutions{}, middleware.After) +} + +func addOpListDatabasesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListDatabases{}, middleware.After) +} + +func addOpListExecutorsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListExecutors{}, middleware.After) +} + +func addOpListNotebookMetadataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListNotebookMetadata{}, middleware.After) +} + +func addOpListNotebookSessionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListNotebookSessions{}, middleware.After) +} + +func addOpListPreparedStatementsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListPreparedStatements{}, middleware.After) +} + +func addOpListSessionsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListSessions{}, middleware.After) +} + +func addOpListTableMetadataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTableMetadata{}, middleware.After) +} + +func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) +} + +func addOpPutCapacityAssignmentConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutCapacityAssignmentConfiguration{}, middleware.After) +} + +func addOpStartCalculationExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartCalculationExecution{}, middleware.After) +} + +func addOpStartQueryExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartQueryExecution{}, middleware.After) +} + +func addOpStartSessionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartSession{}, middleware.After) +} + +func addOpStopCalculationExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStopCalculationExecution{}, middleware.After) +} + +func addOpStopQueryExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStopQueryExecution{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpTerminateSessionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTerminateSession{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateCapacityReservationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateCapacityReservation{}, middleware.After) +} + +func addOpUpdateDataCatalogValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateDataCatalog{}, middleware.After) +} + +func addOpUpdateNamedQueryValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateNamedQuery{}, middleware.After) +} + +func addOpUpdateNotebookValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateNotebook{}, middleware.After) +} + +func addOpUpdateNotebookMetadataValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateNotebookMetadata{}, middleware.After) +} + +func addOpUpdatePreparedStatementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdatePreparedStatement{}, middleware.After) +} + +func addOpUpdateWorkGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateWorkGroup{}, middleware.After) +} + +func validateAclConfiguration(v *types.AclConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AclConfiguration"} + if len(v.S3AclOption) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("S3AclOption")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCustomerContentEncryptionConfiguration(v *types.CustomerContentEncryptionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CustomerContentEncryptionConfiguration"} + if v.KmsKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("KmsKey")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEncryptionConfiguration(v *types.EncryptionConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EncryptionConfiguration"} + if len(v.EncryptionOption) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EncryptionOption")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateEngineConfiguration(v *types.EngineConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EngineConfiguration"} + if v.MaxConcurrentDpus == nil { + invalidParams.Add(smithy.NewErrParamRequired("MaxConcurrentDpus")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateQueryResultsS3AccessGrantsConfiguration(v *types.QueryResultsS3AccessGrantsConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "QueryResultsS3AccessGrantsConfiguration"} + if v.EnableS3AccessGrants == nil { + invalidParams.Add(smithy.NewErrParamRequired("EnableS3AccessGrants")) + } + if len(v.AuthenticationType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("AuthenticationType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateResultConfiguration(v *types.ResultConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResultConfiguration"} + if v.EncryptionConfiguration != nil { + if err := validateEncryptionConfiguration(v.EncryptionConfiguration); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.AclConfiguration != nil { + if err := validateAclConfiguration(v.AclConfiguration); err != nil { + invalidParams.AddNested("AclConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateResultConfigurationUpdates(v *types.ResultConfigurationUpdates) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResultConfigurationUpdates"} + if v.EncryptionConfiguration != nil { + if err := validateEncryptionConfiguration(v.EncryptionConfiguration); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.AclConfiguration != nil { + if err := validateAclConfiguration(v.AclConfiguration); err != nil { + invalidParams.AddNested("AclConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateResultReuseByAgeConfiguration(v *types.ResultReuseByAgeConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResultReuseByAgeConfiguration"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateResultReuseConfiguration(v *types.ResultReuseConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ResultReuseConfiguration"} + if v.ResultReuseByAgeConfiguration != nil { + if err := validateResultReuseByAgeConfiguration(v.ResultReuseByAgeConfiguration); err != nil { + invalidParams.AddNested("ResultReuseByAgeConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWorkGroupConfiguration(v *types.WorkGroupConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WorkGroupConfiguration"} + if v.ResultConfiguration != nil { + if err := validateResultConfiguration(v.ResultConfiguration); err != nil { + invalidParams.AddNested("ResultConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.CustomerContentEncryptionConfiguration != nil { + if err := validateCustomerContentEncryptionConfiguration(v.CustomerContentEncryptionConfiguration); err != nil { + invalidParams.AddNested("CustomerContentEncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.QueryResultsS3AccessGrantsConfiguration != nil { + if err := validateQueryResultsS3AccessGrantsConfiguration(v.QueryResultsS3AccessGrantsConfiguration); err != nil { + invalidParams.AddNested("QueryResultsS3AccessGrantsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWorkGroupConfigurationUpdates(v *types.WorkGroupConfigurationUpdates) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WorkGroupConfigurationUpdates"} + if v.ResultConfigurationUpdates != nil { + if err := validateResultConfigurationUpdates(v.ResultConfigurationUpdates); err != nil { + invalidParams.AddNested("ResultConfigurationUpdates", err.(smithy.InvalidParamsError)) + } + } + if v.CustomerContentEncryptionConfiguration != nil { + if err := validateCustomerContentEncryptionConfiguration(v.CustomerContentEncryptionConfiguration); err != nil { + invalidParams.AddNested("CustomerContentEncryptionConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.QueryResultsS3AccessGrantsConfiguration != nil { + if err := validateQueryResultsS3AccessGrantsConfiguration(v.QueryResultsS3AccessGrantsConfiguration); err != nil { + invalidParams.AddNested("QueryResultsS3AccessGrantsConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchGetNamedQueryInput(v *BatchGetNamedQueryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetNamedQueryInput"} + if v.NamedQueryIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("NamedQueryIds")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchGetPreparedStatementInput(v *BatchGetPreparedStatementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetPreparedStatementInput"} + if v.PreparedStatementNames == nil { + invalidParams.Add(smithy.NewErrParamRequired("PreparedStatementNames")) + } + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpBatchGetQueryExecutionInput(v *BatchGetQueryExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "BatchGetQueryExecutionInput"} + if v.QueryExecutionIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryExecutionIds")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCancelCapacityReservationInput(v *CancelCapacityReservationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CancelCapacityReservationInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateCapacityReservationInput(v *CreateCapacityReservationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateCapacityReservationInput"} + if v.TargetDpus == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetDpus")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateDataCatalogInput(v *CreateDataCatalogInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateDataCatalogInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateNamedQueryInput(v *CreateNamedQueryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateNamedQueryInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Database == nil { + invalidParams.Add(smithy.NewErrParamRequired("Database")) + } + if v.QueryString == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryString")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateNotebookInput(v *CreateNotebookInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateNotebookInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreatePreparedStatementInput(v *CreatePreparedStatementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreatePreparedStatementInput"} + if v.StatementName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StatementName")) + } + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if v.QueryStatement == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryStatement")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreatePresignedNotebookUrlInput(v *CreatePresignedNotebookUrlInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreatePresignedNotebookUrlInput"} + if v.SessionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SessionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateWorkGroupInput(v *CreateWorkGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateWorkGroupInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Configuration != nil { + if err := validateWorkGroupConfiguration(v.Configuration); err != nil { + invalidParams.AddNested("Configuration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteCapacityReservationInput(v *DeleteCapacityReservationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteCapacityReservationInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteDataCatalogInput(v *DeleteDataCatalogInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteDataCatalogInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteNamedQueryInput(v *DeleteNamedQueryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteNamedQueryInput"} + if v.NamedQueryId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NamedQueryId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteNotebookInput(v *DeleteNotebookInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteNotebookInput"} + if v.NotebookId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotebookId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeletePreparedStatementInput(v *DeletePreparedStatementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeletePreparedStatementInput"} + if v.StatementName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StatementName")) + } + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteWorkGroupInput(v *DeleteWorkGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteWorkGroupInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpExportNotebookInput(v *ExportNotebookInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ExportNotebookInput"} + if v.NotebookId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotebookId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetCalculationExecutionCodeInput(v *GetCalculationExecutionCodeInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetCalculationExecutionCodeInput"} + if v.CalculationExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CalculationExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetCalculationExecutionInput(v *GetCalculationExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetCalculationExecutionInput"} + if v.CalculationExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CalculationExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetCalculationExecutionStatusInput(v *GetCalculationExecutionStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetCalculationExecutionStatusInput"} + if v.CalculationExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CalculationExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetCapacityAssignmentConfigurationInput(v *GetCapacityAssignmentConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetCapacityAssignmentConfigurationInput"} + if v.CapacityReservationName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CapacityReservationName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetCapacityReservationInput(v *GetCapacityReservationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetCapacityReservationInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetDatabaseInput(v *GetDatabaseInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetDatabaseInput"} + if v.CatalogName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CatalogName")) + } + if v.DatabaseName == nil { + invalidParams.Add(smithy.NewErrParamRequired("DatabaseName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetDataCatalogInput(v *GetDataCatalogInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetDataCatalogInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetNamedQueryInput(v *GetNamedQueryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetNamedQueryInput"} + if v.NamedQueryId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NamedQueryId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetNotebookMetadataInput(v *GetNotebookMetadataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetNotebookMetadataInput"} + if v.NotebookId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotebookId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetPreparedStatementInput(v *GetPreparedStatementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetPreparedStatementInput"} + if v.StatementName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StatementName")) + } + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetQueryExecutionInput(v *GetQueryExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetQueryExecutionInput"} + if v.QueryExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetQueryResultsInput(v *GetQueryResultsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetQueryResultsInput"} + if v.QueryExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetQueryRuntimeStatisticsInput(v *GetQueryRuntimeStatisticsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetQueryRuntimeStatisticsInput"} + if v.QueryExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetSessionInput(v *GetSessionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetSessionInput"} + if v.SessionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SessionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetSessionStatusInput(v *GetSessionStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetSessionStatusInput"} + if v.SessionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SessionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetTableMetadataInput(v *GetTableMetadataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetTableMetadataInput"} + if v.CatalogName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CatalogName")) + } + if v.DatabaseName == nil { + invalidParams.Add(smithy.NewErrParamRequired("DatabaseName")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetWorkGroupInput(v *GetWorkGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetWorkGroupInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpImportNotebookInput(v *ImportNotebookInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ImportNotebookInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListCalculationExecutionsInput(v *ListCalculationExecutionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListCalculationExecutionsInput"} + if v.SessionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SessionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListDatabasesInput(v *ListDatabasesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListDatabasesInput"} + if v.CatalogName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CatalogName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListExecutorsInput(v *ListExecutorsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListExecutorsInput"} + if v.SessionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SessionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListNotebookMetadataInput(v *ListNotebookMetadataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListNotebookMetadataInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListNotebookSessionsInput(v *ListNotebookSessionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListNotebookSessionsInput"} + if v.NotebookId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotebookId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListPreparedStatementsInput(v *ListPreparedStatementsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListPreparedStatementsInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListSessionsInput(v *ListSessionsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListSessionsInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTableMetadataInput(v *ListTableMetadataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTableMetadataInput"} + if v.CatalogName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CatalogName")) + } + if v.DatabaseName == nil { + invalidParams.Add(smithy.NewErrParamRequired("DatabaseName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} + if v.ResourceARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceARN")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutCapacityAssignmentConfigurationInput(v *PutCapacityAssignmentConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutCapacityAssignmentConfigurationInput"} + if v.CapacityReservationName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CapacityReservationName")) + } + if v.CapacityAssignments == nil { + invalidParams.Add(smithy.NewErrParamRequired("CapacityAssignments")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartCalculationExecutionInput(v *StartCalculationExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartCalculationExecutionInput"} + if v.SessionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SessionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartQueryExecutionInput(v *StartQueryExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartQueryExecutionInput"} + if v.QueryString == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryString")) + } + if v.ResultConfiguration != nil { + if err := validateResultConfiguration(v.ResultConfiguration); err != nil { + invalidParams.AddNested("ResultConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.ResultReuseConfiguration != nil { + if err := validateResultReuseConfiguration(v.ResultReuseConfiguration); err != nil { + invalidParams.AddNested("ResultReuseConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartSessionInput(v *StartSessionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartSessionInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if v.EngineConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("EngineConfiguration")) + } else if v.EngineConfiguration != nil { + if err := validateEngineConfiguration(v.EngineConfiguration); err != nil { + invalidParams.AddNested("EngineConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStopCalculationExecutionInput(v *StopCalculationExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StopCalculationExecutionInput"} + if v.CalculationExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CalculationExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStopQueryExecutionInput(v *StopQueryExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StopQueryExecutionInput"} + if v.QueryExecutionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryExecutionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceARN")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTerminateSessionInput(v *TerminateSessionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TerminateSessionInput"} + if v.SessionId == nil { + invalidParams.Add(smithy.NewErrParamRequired("SessionId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceARN == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceARN")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateCapacityReservationInput(v *UpdateCapacityReservationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateCapacityReservationInput"} + if v.TargetDpus == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetDpus")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateDataCatalogInput(v *UpdateDataCatalogInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateDataCatalogInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateNamedQueryInput(v *UpdateNamedQueryInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateNamedQueryInput"} + if v.NamedQueryId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NamedQueryId")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.QueryString == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryString")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateNotebookInput(v *UpdateNotebookInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateNotebookInput"} + if v.NotebookId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotebookId")) + } + if v.Payload == nil { + invalidParams.Add(smithy.NewErrParamRequired("Payload")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateNotebookMetadataInput(v *UpdateNotebookMetadataInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateNotebookMetadataInput"} + if v.NotebookId == nil { + invalidParams.Add(smithy.NewErrParamRequired("NotebookId")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdatePreparedStatementInput(v *UpdatePreparedStatementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdatePreparedStatementInput"} + if v.StatementName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StatementName")) + } + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if v.QueryStatement == nil { + invalidParams.Add(smithy.NewErrParamRequired("QueryStatement")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateWorkGroupInput(v *UpdateWorkGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateWorkGroupInput"} + if v.WorkGroup == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkGroup")) + } + if v.ConfigurationUpdates != nil { + if err := validateWorkGroupConfigurationUpdates(v.ConfigurationUpdates); err != nil { + invalidParams.AddNested("ConfigurationUpdates", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index c3525fd2..c81265a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,43 @@ +# v1.12.3 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 + +# v1.12.2 (2025-01-24) + +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.12.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. + +# v1.12.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. + +# v1.11.5 (2024-09-20) + +* No change notes available for this release. + +# v1.11.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + +# v1.11.3 (2024-06-28) + +* No change notes available for this release. + +# v1.11.2 (2024-03-29) + +* No change notes available for this release. + +# v1.11.1 (2024-02-21) + +* No change notes available for this release. + +# v1.11.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. + # v1.10.4 (2023-12-07) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index cc638400..d83e533e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.10.4" +const goModuleVersion = "1.12.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md index 8f974036..af4dcdc1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -1,3 +1,176 @@ +# v1.7.0 (2025-03-11) + +* **Feature**: Add extra check during output checksum validation so the validation skip warning would not be logged if object is not fetched from s3 + +# v1.6.2 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2025-02-10) + +* **Feature**: Support CRC64NVME flex checksums. + +# v1.5.6 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.5 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.4 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2025-01-24) + +* **Bug Fix**: Enable request checksum validation mode by default +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.5.2 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.5.1 (2025-01-16) + +* **Bug Fix**: Fix nil dereference panic for operations that require checksums, but do not have an input setting for which algorithm to use. + +# v1.5.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go index a17041c3..dab97fb2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go @@ -9,6 +9,7 @@ import ( "fmt" "hash" "hash/crc32" + "hash/crc64" "io" "strings" "sync" @@ -30,13 +31,20 @@ const ( // AlgorithmSHA256 represents SHA256 hash algorithm AlgorithmSHA256 Algorithm = "SHA256" + + // AlgorithmCRC64NVME represents CRC64NVME hash algorithm + AlgorithmCRC64NVME Algorithm = "CRC64NVME" ) +// inverted NVME polynomial as required by crc64.MakeTable +const crc64NVME = 0x9a6c_9329_ac4b_c9b5 + var supportedAlgorithms = []Algorithm{ AlgorithmCRC32C, AlgorithmCRC32, AlgorithmSHA1, AlgorithmSHA256, + AlgorithmCRC64NVME, } func (a Algorithm) String() string { return string(a) } @@ -89,6 +97,8 @@ func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { return crc32.NewIEEE(), nil case AlgorithmCRC32C: return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil + case AlgorithmCRC64NVME: + return crc64.New(crc64.MakeTable(crc64NVME)), nil default: return nil, fmt.Errorf("unknown checksum algorithm, %v", v) } @@ -106,6 +116,8 @@ func AlgorithmChecksumLength(v Algorithm) (int, error) { return crc32.Size, nil case AlgorithmCRC32C: return crc32.Size, nil + case AlgorithmCRC64NVME: + return crc64.Size, nil default: return 0, fmt.Errorf("unknown checksum algorithm, %v", v) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go index a88534d2..148f77de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -3,4 +3,4 @@ package checksum // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.10" +const goModuleVersion = "1.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go index 1b727acb..274d649f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go @@ -1,6 +1,7 @@ package checksum import ( + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/smithy-go/middleware" ) @@ -14,11 +15,16 @@ type InputMiddlewareOptions struct { // and true, or false if no algorithm is specified. GetAlgorithm func(interface{}) (string, bool) - // Forces the middleware to compute the input payload's checksum. The - // request will fail if the algorithm is not specified or unable to compute - // the checksum. + // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. + // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, + // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated RequireChecksum bool + // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is + // set to true, checksum will be calculated and this field will be ignored, otherwise + // RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + // Enables support for wrapping the serialized input payload with a // content-encoding: aws-check wrapper, and including a trailer for the // algorithm's checksum value. @@ -46,33 +52,16 @@ type InputMiddlewareOptions struct { // AddInputMiddleware adds the middleware for performing checksum computing // of request payloads, and checksum validation of response payloads. +// +// Deprecated: This internal-only runtime API is frozen. Do not call or modify +// it in new code. Checksum-enabled service operations now generate this +// middleware setup code inline per #2507. func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { - // TODO ensure this works correctly with presigned URLs - - // Middleware stack: - // * (OK)(Initialize) --none-- - // * (OK)(Serialize) EndpointResolver - // * (OK)(Build) ComputeContentLength - // * (AD)(Build) Header ComputeInputPayloadChecksum - // * SIGNED Payload - If HTTP && not support trailing checksum - // * UNSIGNED Payload - If HTTPS && not support trailing checksum - // * (RM)(Build) ContentChecksum - OK to remove - // * (OK)(Build) ComputePayloadHash - // * v4.dynamicPayloadSigningMiddleware - // * v4.computePayloadSHA256 - // * v4.unsignedPayload - // (OK)(Build) Set computedPayloadHash header - // * (OK)(Finalize) Retry - // * (AD)(Finalize) Trailer ComputeInputPayloadChecksum, - // * Requires HTTPS && support trailing checksum - // * UNSIGNED Payload - // * Finalize run if HTTPS && support trailing checksum - // * (OK)(Finalize) Signing - // * (OK)(Deserialize) --none-- - // Initial checksum configuration look up middleware - err = stack.Initialize.Add(&setupInputContext{ - GetAlgorithm: options.GetAlgorithm, + err = stack.Initialize.Add(&SetupInputContext{ + GetAlgorithm: options.GetAlgorithm, + RequireChecksum: options.RequireChecksum, + RequestChecksumCalculation: options.RequestChecksumCalculation, }, middleware.Before) if err != nil { return err @@ -80,8 +69,7 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) stack.Build.Remove("ContentChecksum") - inputChecksum := &computeInputPayloadChecksum{ - RequireChecksum: options.RequireChecksum, + inputChecksum := &ComputeInputPayloadChecksum{ EnableTrailingChecksum: options.EnableTrailingChecksum, EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, @@ -92,9 +80,8 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) // If trailing checksum is not supported no need for finalize handler to be added. if options.EnableTrailingChecksum { - trailerMiddleware := &addInputChecksumTrailer{ + trailerMiddleware := &AddInputChecksumTrailer{ EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, - RequireChecksum: inputChecksum.RequireChecksum, EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, } @@ -109,10 +96,10 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) // RemoveInputMiddleware Removes the compute input payload checksum middleware // handlers from the stack. func RemoveInputMiddleware(stack *middleware.Stack) { - id := (*setupInputContext)(nil).ID() + id := (*SetupInputContext)(nil).ID() stack.Initialize.Remove(id) - id = (*computeInputPayloadChecksum)(nil).ID() + id = (*ComputeInputPayloadChecksum)(nil).ID() stack.Finalize.Remove(id) } @@ -126,6 +113,12 @@ type OutputMiddlewareOptions struct { // mode and true, or false if no mode is specified. GetValidationMode func(interface{}) (string, bool) + // SetValidationMode is a function to set the checksum validation mode of input parameters + SetValidationMode func(interface{}, string) + + // ResponseChecksumValidation is the user config to opt-in/out response checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation + // The set of checksum algorithms that should be used for response payload // checksum validation. The algorithm(s) used will be a union of the // output's returned algorithms and this set. @@ -134,7 +127,7 @@ type OutputMiddlewareOptions struct { ValidationAlgorithms []string // If set the middleware will ignore output multipart checksums. Otherwise - // an checksum format error will be returned by the middleware. + // a checksum format error will be returned by the middleware. IgnoreMultipartValidation bool // When set the middleware will log when output does not have checksum or @@ -150,7 +143,9 @@ type OutputMiddlewareOptions struct { // checksum. func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { err := stack.Initialize.Add(&setupOutputContext{ - GetValidationMode: options.GetValidationMode, + GetValidationMode: options.GetValidationMode, + SetValidationMode: options.SetValidationMode, + ResponseChecksumValidation: options.ResponseChecksumValidation, }, middleware.Before) if err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go new file mode 100644 index 00000000..861a4429 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go @@ -0,0 +1,90 @@ +package checksum + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var supportedChecksumFeatures = map[Algorithm]awsmiddleware.UserAgentFeature{ + AlgorithmCRC32: awsmiddleware.UserAgentFeatureRequestChecksumCRC32, + AlgorithmCRC32C: awsmiddleware.UserAgentFeatureRequestChecksumCRC32C, + AlgorithmSHA1: awsmiddleware.UserAgentFeatureRequestChecksumSHA1, + AlgorithmSHA256: awsmiddleware.UserAgentFeatureRequestChecksumSHA256, + AlgorithmCRC64NVME: awsmiddleware.UserAgentFeatureRequestChecksumCRC64, +} + +// RequestChecksumMetricsTracking is the middleware to track operation request's checksum usage +type RequestChecksumMetricsTracking struct { + RequestChecksumCalculation aws.RequestChecksumCalculation + UserAgent *awsmiddleware.RequestUserAgent +} + +// ID provides the middleware identifier +func (m *RequestChecksumMetricsTracking) ID() string { + return "AWSChecksum:RequestMetricsTracking" +} + +// HandleBuild checks request checksum config and checksum value sent +// and sends corresponding feature id to user agent +func (m *RequestChecksumMetricsTracking) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + switch m.RequestChecksumCalculation { + case aws.RequestChecksumCalculationWhenSupported: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenSupported) + case aws.RequestChecksumCalculationWhenRequired: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenRequired) + } + + for algo, feat := range supportedChecksumFeatures { + checksumHeader := AlgorithmHTTPHeader(algo) + if checksum := req.Header.Get(checksumHeader); checksum != "" { + m.UserAgent.AddUserAgentFeature(feat) + } + } + + return next.HandleBuild(ctx, in) +} + +// ResponseChecksumMetricsTracking is the middleware to track operation response's checksum usage +type ResponseChecksumMetricsTracking struct { + ResponseChecksumValidation aws.ResponseChecksumValidation + UserAgent *awsmiddleware.RequestUserAgent +} + +// ID provides the middleware identifier +func (m *ResponseChecksumMetricsTracking) ID() string { + return "AWSChecksum:ResponseMetricsTracking" +} + +// HandleBuild checks the response checksum config and sends corresponding feature id to user agent +func (m *ResponseChecksumMetricsTracking) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + switch m.ResponseChecksumValidation { + case aws.ResponseChecksumValidationWhenSupported: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenSupported) + case aws.ResponseChecksumValidationWhenRequired: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenRequired) + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go index 7ffca33f..ee8ff545 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go @@ -7,6 +7,7 @@ import ( "hash" "io" "strconv" + "strings" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" @@ -16,7 +17,6 @@ import ( ) const ( - contentMD5Header = "Content-Md5" streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" ) @@ -39,8 +39,8 @@ func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { m.Set(computedInputChecksumsKey{}, vs) } -// computeInputPayloadChecksum middleware computes payload checksum -type computeInputPayloadChecksum struct { +// ComputeInputPayloadChecksum middleware computes payload checksum +type ComputeInputPayloadChecksum struct { // Enables support for wrapping the serialized input payload with a // content-encoding: aws-check wrapper, and including a trailer for the // algorithm's checksum value. @@ -49,13 +49,6 @@ type computeInputPayloadChecksum struct { // the Algorithm's header is already set on the request. EnableTrailingChecksum bool - // States that a checksum is required to be included for the operation. If - // Input does not specify a checksum, fallback to built in MD5 checksum is - // used. - // - // Replaces smithy-go's ContentChecksum middleware. - RequireChecksum bool - // Enables support for computing the SHA256 checksum of input payloads // along with the algorithm specified checksum. Prevents downstream // middleware handlers (computePayloadSHA256) re-reading the payload. @@ -78,7 +71,7 @@ type computeInputPayloadChecksum struct { type useTrailer struct{} // ID provides the middleware's identifier. -func (m *computeInputPayloadChecksum) ID() string { +func (m *ComputeInputPayloadChecksum) ID() string { return "AWSChecksum:ComputeInputPayloadChecksum" } @@ -98,18 +91,27 @@ func (e computeInputHeaderChecksumError) Error() string { } func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } -// HandleBuild handles computing the payload's checksum, in the following cases: +// HandleFinalize handles computing the payload's checksum, in the following cases: // - Is HTTP, not HTTPS // - RequireChecksum is true, and no checksums were specified via the Input // - Trailing checksums are not supported // // The build handler must be inserted in the stack before ContentPayloadHash // and after ComputeContentLength. -func (m *computeInputPayloadChecksum) HandleFinalize( +func (m *ComputeInputPayloadChecksum) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + var checksum string + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, err + } + if !ok { + return next.HandleFinalize(ctx, in) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, computeInputHeaderChecksumError{ @@ -117,8 +119,6 @@ func (m *computeInputPayloadChecksum) HandleFinalize( } } - var algorithm Algorithm - var checksum string defer func() { if algorithm == "" || checksum == "" || err != nil { return @@ -130,29 +130,14 @@ func (m *computeInputPayloadChecksum) HandleFinalize( }) }() - // If no algorithm was specified, and the operation requires a checksum, - // fallback to the legacy content MD5 checksum. - algorithm, ok, err = getInputAlgorithm(ctx) - if err != nil { - return out, metadata, err - } else if !ok { - if m.RequireChecksum { - checksum, err = setMD5Checksum(ctx, req) - if err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to compute stream's MD5 checksum", - Err: err, - } - } - algorithm = Algorithm("MD5") + // If any checksum header is already set nothing to do. + for header := range req.Header { + h := strings.ToUpper(header) + if strings.HasPrefix(h, "X-AMZ-CHECKSUM-") { + algorithm = Algorithm(strings.TrimPrefix(h, "X-AMZ-CHECKSUM-")) + checksum = req.Header.Get(header) + return next.HandleFinalize(ctx, in) } - return next.HandleFinalize(ctx, in) - } - - // If the checksum header is already set nothing to do. - checksumHeader := AlgorithmHTTPHeader(algorithm) - if checksum = req.Header.Get(checksumHeader); checksum != "" { - return next.HandleFinalize(ctx, in) } computePayloadHash := m.EnableComputePayloadHash @@ -217,6 +202,7 @@ func (m *computeInputPayloadChecksum) HandleFinalize( } } + checksumHeader := AlgorithmHTTPHeader(algorithm) req.Header.Set(checksumHeader, checksum) if computePayloadHash { @@ -242,28 +228,37 @@ func (e computeInputTrailingChecksumError) Error() string { } func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } -// addInputChecksumTrailer +// AddInputChecksumTrailer adds HTTP checksum when // - Is HTTPS, not HTTP // - A checksum was specified via the Input // - Trailing checksums are supported. -type addInputChecksumTrailer struct { +type AddInputChecksumTrailer struct { EnableTrailingChecksum bool - RequireChecksum bool EnableComputePayloadHash bool EnableDecodedContentLengthHeader bool } // ID identifies this middleware. -func (*addInputChecksumTrailer) ID() string { +func (*AddInputChecksumTrailer) ID() string { return "addInputChecksumTrailer" } // HandleFinalize wraps the request body to write the trailing checksum. -func (m *addInputChecksumTrailer) HandleFinalize( +func (m *AddInputChecksumTrailer) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to get algorithm", + Err: err, + } + } else if !ok { + return next.HandleFinalize(ctx, in) + } + if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled { return next.HandleFinalize(ctx, in) } @@ -281,26 +276,13 @@ func (m *addInputChecksumTrailer) HandleFinalize( } } - // If no algorithm was specified, there is nothing to do. - algorithm, ok, err := getInputAlgorithm(ctx) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to get algorithm", - Err: err, - } - } else if !ok { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "no algorithm specified", + // If any checksum header is already set nothing to do. + for header := range req.Header { + if strings.HasPrefix(strings.ToLower(header), "x-amz-checksum-") { + return next.HandleFinalize(ctx, in) } } - // If the checksum header is already set before finalize could run, there - // is nothing to do. - checksumHeader := AlgorithmHTTPHeader(algorithm) - if req.Header.Get(checksumHeader) != "" { - return next.HandleFinalize(ctx, in) - } - stream := req.GetStream() streamLength, err := getRequestStreamLength(req) if err != nil { @@ -444,39 +426,3 @@ func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { return -1, nil } - -// setMD5Checksum computes the MD5 of the request payload and sets it to the -// Content-MD5 header. Returning the MD5 base64 encoded string or error. -// -// If the MD5 is already set as the Content-MD5 header, that value will be -// returned, and nothing else will be done. -// -// If the payload is empty, no MD5 will be computed. No error will be returned. -// Empty payloads do not have an MD5 value. -// -// Replaces the smithy-go middleware for httpChecksum trait. -func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) { - if v := req.Header.Get(contentMD5Header); len(v) != 0 { - return v, nil - } - stream := req.GetStream() - if stream == nil { - return "", nil - } - - if !req.IsStreamSeekable() { - return "", fmt.Errorf( - "unseekable stream is not supported for computing md5 checksum") - } - - v, err := computeMD5Checksum(stream) - if err != nil { - return "", err - } - if err := req.RewindStream(); err != nil { - return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err) - } - // set the 'Content-MD5' header - req.Header.Set(contentMD5Header, string(v)) - return string(v), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go index 3db73afe..3347e88c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go @@ -3,43 +3,62 @@ package checksum import ( "context" + "github.com/aws/aws-sdk-go-v2/aws" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/smithy-go/middleware" ) -// setupChecksumContext is the initial middleware that looks up the input +const ( + checksumValidationModeEnabled = "ENABLED" +) + +// SetupInputContext is the initial middleware that looks up the input // used to configure checksum behavior. This middleware must be executed before // input validation step or any other checksum middleware. -type setupInputContext struct { +type SetupInputContext struct { // GetAlgorithm is a function to get the checksum algorithm of the // input payload from the input parameters. // // Given the input parameter value, the function must return the algorithm // and true, or false if no algorithm is specified. GetAlgorithm func(interface{}) (string, bool) + + // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. + // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, + // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequireChecksum bool + + // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is + // set to true, checksum will be calculated and this field will be ignored, otherwise + // RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation } // ID for the middleware -func (m *setupInputContext) ID() string { +func (m *SetupInputContext) ID() string { return "AWSChecksum:SetupInputContext" } // HandleInitialize initialization middleware that setups up the checksum // context based on the input parameters provided in the stack. -func (m *setupInputContext) HandleInitialize( +func (m *SetupInputContext) HandleInitialize( ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, ) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - // Check if validation algorithm is specified. + // nil check here is for operations that require checksum but do not have input algorithm setting if m.GetAlgorithm != nil { - // check is input resource has a checksum algorithm - algorithm, ok := m.GetAlgorithm(in.Parameters) - if ok && len(algorithm) != 0 { + if algorithm, ok := m.GetAlgorithm(in.Parameters); ok { ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) + return next.HandleInitialize(ctx, in) } } + if m.RequireChecksum || m.RequestChecksumCalculation == aws.RequestChecksumCalculationWhenSupported { + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) + } + return next.HandleInitialize(ctx, in) } @@ -50,6 +69,12 @@ type setupOutputContext struct { // Given the input parameter value, the function must return the validation // mode and true, or false if no mode is specified. GetValidationMode func(interface{}) (string, bool) + + // SetValidationMode is a function to set the checksum validation mode of input parameters + SetValidationMode func(interface{}, string) + + // ResponseChecksumValidation states user config to opt-in/out checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation } // ID for the middleware @@ -64,13 +89,12 @@ func (m *setupOutputContext) HandleInitialize( ) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - // Check if validation mode is specified. - if m.GetValidationMode != nil { - // check is input resource has a checksum algorithm - mode, ok := m.GetValidationMode(in.Parameters) - if ok && len(mode) != 0 { - ctx = setContextOutputValidationMode(ctx, mode) - } + + mode, _ := m.GetValidationMode(in.Parameters) + + if m.ResponseChecksumValidation == aws.ResponseChecksumValidationWhenSupported || mode == checksumValidationModeEnabled { + m.SetValidationMode(in.Parameters, checksumValidationModeEnabled) + ctx = setContextOutputValidationMode(ctx, checksumValidationModeEnabled) } return next.HandleInitialize(ctx, in) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go index 9fde12d8..6b56f8c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go @@ -3,6 +3,7 @@ package checksum import ( "context" "fmt" + "net/http" "strings" "github.com/aws/smithy-go" @@ -55,7 +56,7 @@ func (m *validateOutputPayloadChecksum) ID() string { } // HandleDeserialize is a Deserialize middleware that wraps the HTTP response -// body with an io.ReadCloser that will validate the its checksum. +// body with an io.ReadCloser that will validate its checksum. func (m *validateOutputPayloadChecksum) HandleDeserialize( ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, ) ( @@ -66,8 +67,7 @@ func (m *validateOutputPayloadChecksum) HandleDeserialize( return out, metadata, err } - // If there is no validation mode specified nothing is supported. - if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" { + if mode := getContextOutputValidationMode(ctx); mode != checksumValidationModeEnabled { return out, metadata, err } @@ -90,13 +90,11 @@ func (m *validateOutputPayloadChecksum) HandleDeserialize( algorithmToUse = algorithm } - // TODO this must validate the validation mode is set to enabled. - logger := middleware.GetLogger(ctx) // Skip validation if no checksum algorithm or checksum is available. if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { - if m.LogValidationSkipped { + if response.StatusCode != 404 && response.Body != http.NoBody && m.LogValidationSkipped { // TODO this probably should have more information about the // operation output that won't be validated. logger.Logf(logging.Warn, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index a65890b5..2b5ceb4b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,158 @@ +# v1.12.15 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.12.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.18 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.6 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.5 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2024-03-05) + +* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility. + +# v1.11.3 (2024-03-04) + +* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API. + +# v1.11.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.10.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go index cc919701..5d5286f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go @@ -27,13 +27,21 @@ func GetIsPresigning(ctx context.Context) bool { type isPresigningKey struct{} -// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that +// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that // will update the stack's context to be flagged as being invoked for the // purpose of presigning. -func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { +func AddAsIsPresigningMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) } +// AddAsIsPresigingMiddleware is an alias for backwards compatibility. +// +// Deprecated: This API was released with a typo. Use +// [AddAsIsPresigningMiddleware] instead. +func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { + return AddAsIsPresigningMiddleware(stack) +} + type asIsPresigningMiddleware struct{} func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 073e8866..a165a100 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.10.10" +const goModuleVersion = "1.12.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md index c4df2176..ff2188db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -1,3 +1,149 @@ +# v1.18.15 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.18.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go index 986affe1..a0129140 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -3,4 +3,4 @@ package s3shared // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.10" +const goModuleVersion = "1.18.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go index f52f2f11..7251588b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go @@ -5,6 +5,7 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -30,6 +31,8 @@ func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware ) { out, metadata, err = next.HandleDeserialize(ctx, in) + span, _ := tracing.GetSpan(ctx) + resp, ok := out.RawResponse.(*smithyhttp.Response) if !ok { // No raw response to wrap with. @@ -40,12 +43,14 @@ func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { // set reqID on metadata for successful responses. awsmiddleware.SetRequestIDMetadata(&metadata, v) + span.SetProperty("aws.request_id", v) } // look up host-id if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { // set reqID on metadata for successful responses. SetHostIDMetadata(&metadata, v) + span.SetProperty("aws.extended_request_id", v) } return out, metadata, err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index 89a5a0d5..f2be7dd9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,362 @@ +# v1.79.2 (2025-04-10) + +* No change notes available for this release. + +# v1.79.1 (2025-04-03) + +* No change notes available for this release. + +# v1.79.0 (2025-03-31) + +* **Feature**: Amazon S3 adds support for S3 Access Points for directory buckets in AWS Dedicated Local Zones + +# v1.78.2 (2025-03-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.78.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.78.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.77.1 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.77.0 (2025-02-14) + +* **Feature**: Added support for Content-Range header in HeadObject response. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.76.1 (2025-02-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.76.0 (2025-02-06) + +* **Feature**: Updated list of the valid AWS Region values for the LocationConstraint parameter for general purpose buckets. + +# v1.75.4 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.3 (2025-02-04) + +* No change notes available for this release. + +# v1.75.2 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.1 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.0 (2025-01-29) + +* **Feature**: Change the type of MpuObjectSize in CompleteMultipartUploadRequest from int to long. + +# v1.74.1 (2025-01-24) + +* **Bug Fix**: Enable request checksum validation mode by default +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.74.0 (2025-01-22) + +* **Feature**: Add a client config option to disable logging when output checksum validation is skipped due to an unsupported algorithm. + +# v1.73.2 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.1 (2025-01-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Feature**: This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.72.3 (2025-01-14) + +* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information. + +# v1.72.2 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.72.1 (2025-01-08) + +* No change notes available for this release. + +# v1.72.0 (2025-01-03) + +* **Feature**: This change is only for updating the model regexp of CopySource which is not for validation but only for documentation and user guide change. + +# v1.71.1 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.71.0 (2024-12-03.2) + +* **Feature**: Amazon S3 Metadata stores object metadata in read-only, fully managed Apache Iceberg metadata tables that you can query. You can create metadata table configurations for S3 general purpose buckets. + +# v1.70.0 (2024-12-02) + +* **Feature**: Amazon S3 introduces support for AWS Dedicated Local Zones +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.69.0 (2024-11-25) + +* **Feature**: Amazon Simple Storage Service / Features: Add support for ETag based conditional writes in PutObject and CompleteMultiPartUpload APIs to prevent unintended object modifications. + +# v1.68.0 (2024-11-21) + +* **Feature**: Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API. + +# v1.67.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.67.0 (2024-11-14) + +* **Feature**: This release updates the ListBuckets API Reference documentation in support of the new 10,000 general purpose bucket default quota on all AWS accounts. To increase your bucket quota from 10,000 to up to 1 million buckets, simply request a quota increase via Service Quotas. + +# v1.66.3 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.66.2 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.66.1 (2024-10-25) + +* **Bug Fix**: Update presign post URL resolution to use the exact result from EndpointResolverV2 + +# v1.66.0 (2024-10-16) + +* **Feature**: Add support for the new optional bucket-region and prefix query parameters in the ListBuckets API. For ListBuckets requests that express pagination, Amazon S3 will now return both the bucket names and associated AWS regions in the response. + +# v1.65.3 (2024-10-11) + +* **Bug Fix**: **BREAKING CHANGE**: S3 ReplicationRuleFilter and LifecycleRuleFilter shapes are being changed from union to structure types + +# v1.65.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.65.1 (2024-10-07) + +* **Bug Fix**: **CHANGE IN BEHAVIOR**: Allow serialization of headers with empty string for prefix headers. We are deploying this fix because the behavior is actively preventing users from transmitting keys with empty values to the service. If you were setting metadata keys with empty values before this change, they will now actually be sent to the service. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.65.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.64.1 (2024-10-03) + +* No change notes available for this release. + +# v1.64.0 (2024-10-02) + +* **Feature**: This release introduces a header representing the minimum object size limit for Lifecycle transitions. + +# v1.63.3 (2024-09-27) + +* No change notes available for this release. + +# v1.63.2 (2024-09-25) + +* No change notes available for this release. + +# v1.63.1 (2024-09-23) + +* No change notes available for this release. + +# v1.63.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.62.0 (2024-09-18) + +* **Feature**: Added SSE-KMS support for directory buckets. + +# v1.61.3 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.61.2 (2024-09-04) + +* No change notes available for this release. + +# v1.61.1 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.61.0 (2024-08-28) + +* **Feature**: Add presignPost for s3 PutObject + +# v1.60.1 (2024-08-22) + +* No change notes available for this release. + +# v1.60.0 (2024-08-20) + +* **Feature**: Amazon Simple Storage Service / Features : Add support for conditional writes for PutObject and CompleteMultipartUpload APIs. + +# v1.59.0 (2024-08-15) + +* **Feature**: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API. +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.3 (2024-08-02) + +* **Bug Fix**: Add assurance tests for auth scheme selection logic. + +# v1.58.2 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.1 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.0 (2024-07-02) + +* **Feature**: Added response overrides to Head Object requests. + +# v1.57.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.57.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.56.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.56.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.2 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.1 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.0 (2024-06-05) + +* **Feature**: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality. +* **Bug Fix**: Add S3-specific smithy protocol tests. + +# v1.54.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.3 (2024-05-23) + +* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK). + +# v1.54.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.0 (2024-05-14) + +* **Feature**: Updated a few x-id in the http uri traits + +# v1.53.2 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.53.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.0 (2024-03-18) + +* **Feature**: Fix two issues with response root node names. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.52.1 (2024-03-15) + +* **Documentation**: Documentation updates for Amazon S3. + +# v1.52.0 (2024-03-13) + +* **Feature**: This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT). + +# v1.51.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.3 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.2 (2024-03-04) + +* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.50.3 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.2 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.50.1 (2024-02-19) + +* **Bug Fix**: Prevent potential panic caused by invalid comparison of credentials. + +# v1.50.0 (2024-02-16) + +* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters. + +# v1.49.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.48.1 (2024-01-24) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go index db35814d..40035a43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -14,6 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" "github.com/aws/aws-sdk-go-v2/internal/v4a" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -22,22 +24,156 @@ import ( s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) const ServiceID = "S3" const ServiceAPIVersion = "2006-03-01" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/s3") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/s3") +} + // Client provides the API client to make operations call for Amazon Simple // Storage Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -60,6 +196,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4a(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -82,6 +222,8 @@ func New(options Options, optFns ...func(*Options)) *Client { finalizeExpressCredentials(&options, client) + initializeTimeOffsetResolver(client) + return client } @@ -94,8 +236,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -125,15 +274,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -171,7 +361,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -259,15 +449,17 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + RequestChecksumCalculation: cfg.RequestChecksumCalculation, + ResponseChecksumValidation: cfg.ResponseChecksumValidation, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -394,17 +586,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { } func addClientUserAgent(stack *middleware.Stack, options Options) error { - if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion)(stack); err != nil { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { return err } + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion) if len(options.AppID) > 0 { - return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) } return nil } +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -424,12 +636,97 @@ func newDefaultV4Signer(o Options) *v4.Signer { }) } -func addRetryMiddlewares(stack *middleware.Stack, o Options) error { - mo := retry.AddRetryMiddlewaresOptions{ - Retryer: o.Retryer, - LogRetryAttempts: o.ClientLogMode.IsRetries(), +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") + }) + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { + return err } - return retry.AddRetryMiddlewares(stack, mo) + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil } // resolves UseARNRegion S3 configuration @@ -492,6 +789,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + type httpSignerV4a interface { SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, service string, regionSet []string, signingTime time.Time, @@ -512,6 +821,99 @@ func newDefaultV4aSigner(o Options) *v4a.Signer { }) } +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addRequestChecksumMetricsTracking(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Build.Insert(&internalChecksum.RequestChecksumMetricsTracking{ + RequestChecksumCalculation: options.RequestChecksumCalculation, + UserAgent: ua, + }, "UserAgent", middleware.Before) +} + +func addResponseChecksumMetricsTracking(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Build.Insert(&internalChecksum.ResponseChecksumMetricsTracking{ + ResponseChecksumValidation: options.ResponseChecksumValidation, + UserAgent: ua, + }, "UserAgent", middleware.Before) +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { return s3shared.AddMetadataRetrieverMiddleware(stack) } @@ -520,6 +922,10 @@ func add100Continue(stack *middleware.Stack, options Options) error { return s3shared.Add100Continue(stack, options.ContinueHeaderThresholdBytes) } +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + // ComputedInputChecksumsMetadata provides information about the algorithms used // to compute the checksum(s) of the input payload. type ComputedInputChecksumsMetadata struct { @@ -541,6 +947,41 @@ func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChec } +func addInputChecksumMiddleware(stack *middleware.Stack, options internalChecksum.InputMiddlewareOptions) (err error) { + err = stack.Initialize.Add(&internalChecksum.SetupInputContext{ + GetAlgorithm: options.GetAlgorithm, + RequireChecksum: options.RequireChecksum, + RequestChecksumCalculation: options.RequestChecksumCalculation, + }, middleware.Before) + if err != nil { + return err + } + + stack.Build.Remove("ContentChecksum") + + inputChecksum := &internalChecksum.ComputeInputPayloadChecksum{ + EnableTrailingChecksum: options.EnableTrailingChecksum, + EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, + EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { + return err + } + + if options.EnableTrailingChecksum { + trailerMiddleware := &internalChecksum.AddInputChecksumTrailer{ + EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, + EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, + EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(trailerMiddleware, inputChecksum.ID(), middleware.After); err != nil { + return err + } + } + + return nil +} + // ChecksumValidationMetadata contains metadata such as the checksum algorithm // used for data integrity validation. type ChecksumValidationMetadata struct { @@ -792,13 +1233,17 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op if err != nil { return err } - err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) if err != nil { return err } return nil } +func withNoDefaultChecksumAPIOption(options *Options) { + options.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired +} + func addRequestResponseLogging(stack *middleware.Stack, o Options) error { return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ LogRequest: o.ClientLogMode.IsRequest(), @@ -836,3 +1281,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go index d1e7dcea..1972abe2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "time" ) // This operation aborts a multipart upload. After a multipart upload is aborted, @@ -18,22 +19,35 @@ import ( // by any previously uploaded parts will be freed. However, if any part uploads are // currently in progress, those part uploads might or might not succeed. As a // result, it might be necessary to abort a given multipart upload multiple times -// in order to completely free all storage consumed by all parts. To verify that -// all parts have been removed and prevent getting charged for the part storage, -// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// API operation and ensure that the parts list is empty. Directory buckets - For -// directory buckets, you must make requests for this API operation to the Zonal -// endpoint. These endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed and prevent getting charged for the +// part storage, you should call the [ListParts]API operation and ensure that the parts list +// is empty. +// +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. To delete these in-progress multipart uploads, +// use the ListMultipartUploads operation to list the in-progress multipart +// uploads in the bucket and use the AbortMultipartUpload operation to abort all +// the in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -41,17 +55,32 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to AbortMultipartUpload : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to AbortMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// [ListParts] +// +// [ListMultipartUploads] +// +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { if params == nil { params = &AbortMultipartUploadInput{} @@ -69,31 +98,40 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar type AbortMultipartUploadInput struct { - // The bucket name to which the upload was taking place. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name to which the upload was taking place. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,20 +151,33 @@ type AbortMultipartUploadInput struct { // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // If present, this header aborts an in progress multipart upload only if it was + // initiated on the provided timestamp. If the initiated timestamp of the multipart + // upload does not match the provided value, the operation returns a 412 + // Precondition Failed error. If the initiated timestamp matches or if the + // multipart upload doesn’t exist, the operation returns a 204 Success (No Content) + // response. + // + // This functionality is only supported for directory buckets. + IfMatchInitiatedTime *time.Time + // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -135,7 +186,9 @@ func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -166,25 +219,28 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -202,6 +258,18 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -211,7 +279,7 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { @@ -235,6 +303,18 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go index b9f094f1..f209edd7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -13,88 +13,138 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Completes a multipart upload by assembling previously uploaded parts. You first -// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. After successfully uploading all relevant parts of an upload, you -// call this CompleteMultipartUpload operation to complete the upload. Upon -// receiving this request, Amazon S3 concatenates all the parts in ascending order -// by part number to create a new object. In the CompleteMultipartUpload request, -// you must provide the parts list and ensure that the parts list is complete. The -// CompleteMultipartUpload API operation concatenates the parts that you provide in -// the list. For each part in the list, you must provide the PartNumber value and -// the ETag value that are returned after that part was uploaded. The processing -// of a CompleteMultipartUpload request could take several minutes to finalize. -// After Amazon S3 begins processing the request, it sends an HTTP response header -// that specifies a 200 OK response. While processing is in progress, Amazon S3 -// periodically sends white space characters to keep the connection from timing -// out. A request could fail after the initial 200 OK response has been sent. This -// means that a 200 OK response can contain either a success or an error. The -// error response might be embedded in the 200 OK response. If you call this API -// operation directly, make sure to design your application to parse the contents -// of the response and handle it appropriately. If you use Amazon Web Services -// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply -// error handling per your configuration settings (including automatically retrying -// the request as appropriate). If the condition persists, the SDKs throw an -// exception (or, for the SDKs that don't use exceptions, they return an error). +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the [UploadPart] +// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of +// an upload, you call this CompleteMultipartUpload operation to complete the +// upload. Upon receiving this request, Amazon S3 concatenates all the parts in +// ascending order by part number to create a new object. In the +// CompleteMultipartUpload request, you must provide the parts list and ensure that +// the parts list is complete. The CompleteMultipartUpload API operation +// concatenates the parts that you provide in the list. For each part in the list, +// you must provide the PartNumber value and the ETag value that are returned +// after that part was uploaded. +// +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either a +// success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect +// the embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). +// // Note that if CompleteMultipartUpload fails, applications should be prepared to -// retry the failed requests. For more information, see Amazon S3 Error Best -// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) -// . You can't use Content-Type: application/x-www-form-urlencoded for the +// retry any failed requests (including 500 error responses). For more information, +// see [Amazon S3 Error Best Practices]. +// +// You can't use Content-Type: application/x-www-form-urlencoded for the // CompleteMultipartUpload requests. Also, if you don't provide a Content-Type -// header, CompleteMultipartUpload can still return a 200 OK response. For more -// information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// header, CompleteMultipartUpload can still return a 200 OK response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// +// If you provide an [additional checksum value]in your MultipartUpload requests and the object is encrypted +// +// with Key Management Service, you must have permission to use the kms:Decrypt +// action for the CompleteMultipartUpload request to succeed. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Special errors +// // - Error Code: EntityTooSmall +// // - Description: Your proposed upload is smaller than the minimum allowed // object size. Each part must be at least 5 MB in size, except the last part. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPart +// // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified ETag might not have matched // the uploaded part's ETag. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPartOrder +// // - Description: The list of parts was not in ascending order. The parts list // must be specified in order by part number. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CompleteMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to CompleteMultipartUpload : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [additional checksum value]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { if params == nil { params = &CompleteMultipartUploadInput{} @@ -112,31 +162,40 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu type CompleteMultipartUploadInput struct { - // Name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -153,37 +212,98 @@ type CompleteMultipartUploadInput struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // This header specifies the checksum type of the object, which determines how + // part-level checksums are combined to create an object-level checksum for + // multipart objects. You can use this header as a data integrity check to verify + // that the checksum type that is received is the same checksum that was specified. + // If the checksum type doesn’t match the checksum type that was specified for the + // object during the CreateMultipartUpload request, it’ll result in a BadDigest + // error. For more information, see Checking object integrity in the Amazon S3 User + // Guide. + ChecksumType types.ChecksumType + // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // Uploads the object only if the ETag (entity tag) value provided during the + // WRITE operation matches the ETag of the object in S3. If the ETag values do not + // match, the operation returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should fetch the + // object's ETag, re-initiate the multipart upload with CreateMultipartUpload , and + // re-upload each part. + // + // Expects the ETag value as a string. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // Uploads the object only if the object key name does not already exist in the + // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should re-initiate the + // multipart upload with CreateMultipartUpload and re-upload each part. + // + // Expects the '*' (asterisk) character. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfNoneMatch *string + + // The expected total object size of the multipart upload request. If there’s a + // mismatch between the specified object size value and the actual object size + // value, it results in an HTTP 400 InvalidRequest error. + MpuObjectSize *int64 + // The container for the multipart upload request information. MultipartUpload *types.CompletedMultipartUpload @@ -191,38 +311,47 @@ type CompleteMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is required only when the object was created using a checksum // algorithm or if your bucket policy requires the use of SSE-C. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -231,63 +360,90 @@ func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // as a data integrity check to verify that the checksum type that is received is + // the same checksum type that was specified during the CreateMultipartUpload + // request. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Entity tag that identifies the newly created object's data. Objects with // different object data will have different entity tags. The entity tag is an // opaque string. The entity tag may or may not be an MD5 digest of the object // data. If the entity tag is not an MD5 digest of the object data, it will contain // one or more nonhexadecimal characters and/or will consist of less than 32 or // more than 32 hexadecimal digits. For more information about how the entity tag - // is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // is calculated, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ETag *string // If the object expiration is configured, this will contain the expiration date ( // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + // // This functionality is not supported for directory buckets. Expiration *string @@ -298,21 +454,22 @@ type CompleteMultipartUploadOutput struct { Location *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Version ID of the newly created object, in case the bucket has versioning - // turned on. This functionality is not supported for directory buckets. + // turned on. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -343,25 +500,28 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -379,6 +539,18 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -388,7 +560,7 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { @@ -415,6 +587,18 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go index deb21cee..a4c3f2de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -15,97 +15,155 @@ import ( "time" ) -// Creates a copy of an object that is already stored in Amazon S3. You can store -// individual objects of up to 5 TB in Amazon S3. You create a copy of your object -// up to 5 GB in size in a single atomic action using this API. However, to copy an -// object greater than 5 GB, you must use the multipart upload Upload Part - Copy -// (UploadPartCopy) API. For more information, see Copy Object Using the REST -// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html) -// . You can copy individual objects between general purpose buckets, between +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a copy +// of your object up to 5 GB in size in a single atomic action using this API. +// However, to copy an object greater than 5 GB, you must use the multipart upload +// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API]. +// +// You can copy individual objects between general purpose buckets, between // directory buckets, and between general purpose buckets and directory buckets. -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Both the Region that you want to copy the object -// from and the Region that you want to copy the object to must be enabled for your -// account. Amazon S3 transfer acceleration does not support cross-Region copies. -// If you request a cross-Region copy using a transfer acceleration endpoint, you -// get a 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . Authentication and authorization All CopyObject requests must be -// authenticated and signed by using IAM credentials (access key ID and secret -// access key for the IAM identities). All headers with the x-amz- prefix, -// including x-amz-copy-source , must be signed. For more information, see REST -// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use the IAM credentials to authenticate and +// +// - Amazon S3 supports copy operations using Multi-Region Access Points only as +// a destination when using the Multi-Region Access Point ARN. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// - VPC endpoints don't support cross-Region requests (including copies). If +// you're using VPC endpoints, your source and destination buckets should be in the +// same Amazon Web Services Region as your VPC endpoint. +// +// Both the Region that you want to copy the object from and the Region that you +// want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon +// Web Services Account Management Guide. +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If you +// request a cross-Region copy using a transfer acceleration endpoint, you get a +// 400 Bad Request error. For more information, see [Transfer Acceleration]. +// +// Authentication and authorization All CopyObject requests must be authenticated +// and signed by using IAM credentials (access key ID and secret access key for the +// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source +// , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use the IAM credentials to authenticate and // authorize your access to the CopyObject API operation, instead of using the -// temporary security credentials through the CreateSession API operation. Amazon -// Web Services CLI or SDKs handles authentication and authorization on your -// behalf. Permissions You must have read access to the source object and write -// access to the destination bucket. +// temporary security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// +// Permissions You must have read access to the source object and write access to +// the destination bucket. +// // - General purpose bucket permissions - You must have permissions in an IAM // policy based on the source and destination bucket types in a CopyObject // operation. +// // - If the source object is in a general purpose bucket, you must have // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have -// s3:PubObject permission to write the object copy to the destination bucket. +// s3:PutObject permission to write the object copy to the destination bucket. +// // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in a CopyObject operation. +// // - If the source object that you want to copy is in a directory bucket, you // must have the s3express:CreateSession permission in the Action element of a // policy to read the object. By default, the session is in the ReadWrite mode. // If you want to restrict the access, you can explicitly set the // s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// // - If the copy destination is a directory bucket, you must have the // s3express:CreateSession permission in the Action element of a policy to write // the object to the destination. The s3express:SessionMode condition key can't -// be set to ReadOnly on the copy destination bucket. For example policies, see -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// be set to ReadOnly on the copy destination bucket. +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Response and special errors When the request is an HTTP 1.1 request, the // response is chunk encoded. When the request is not an HTTP 1.1 request, the // response would not contain the Content-Length . You always need to read the -// entire response body to check if the copy succeeds. to keep the connection alive -// while we copy the data. +// entire response body to check if the copy succeeds. +// // - If the copy is successful, you receive a response with information about // the copied object. +// // - A copy request might return an error when Amazon S3 receives the copy // request or while Amazon S3 is copying the files. A 200 OK response can contain // either a success or an error. +// // - If the error occurs before the copy action starts, you receive a standard // Amazon S3 error. +// // - If the error occurs during the copy operation, the error response is // embedded in the 200 OK response. For example, in a cross-region copy, you may -// encounter throttling and receive a 200 OK response. For more information, see -// Resolve the Error 200 response when copying objects to Amazon S3 . The 200 OK -// status code means the copy was accepted, but it doesn't mean the copy is -// complete. Another example is when you disconnect from Amazon S3 before the copy -// is complete, Amazon S3 might cancel the copy and you may receive a 200 OK -// response. You must stay connected to Amazon S3 until the entire response is -// successfully received and processed. If you call this API operation directly, -// make sure to design your application to parse the content of the response and -// handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this -// condition. The SDKs detect the embedded error and apply error handling per your -// configuration settings (including automatically retrying the request as -// appropriate). If the condition persists, the SDKs throw an exception (or, for -// the SDKs that don't use exceptions, they return an error). +// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3] +// . The 200 OK status code means the copy was accepted, but it doesn't mean the +// copy is complete. Another example is when you disconnect from Amazon S3 before +// the copy is complete, Amazon S3 might cancel the copy and you may receive a +// 200 OK response. You must stay connected to Amazon S3 until the entire +// response is successfully received and processed. +// +// If you call this API operation directly, make sure to design your application +// +// to parse the content of the response and handle it appropriately. If you use +// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the +// embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). // // Charge The copy request charge is based on the storage class and Region that // you specify for the destination object. The request can also result in a data // retrieval charge for the source if the source storage class bills for data -// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/) -// . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CopyObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// retrieval. If the copy source is in a different region, the data transfer is +// billed to the copy source account. For pricing information, see [Amazon S3 pricing]. +// +// HTTP Host header syntax +// +// - Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// - Amazon S3 on Outposts - When you use this action with S3 on Outposts +// through the REST API, you must direct requests to the S3 on Outposts hostname. +// The S3 on Outposts hostname takes the form +// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The +// hostname isn't required when you use the Amazon Web Services CLI or SDKs. +// +// The following operations are related to CopyObject : +// +// [PutObject] +// +// [GetObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror +// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { if params == nil { params = &CopyObjectInput{} @@ -123,31 +181,52 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns type CopyObjectInput struct { - // The name of the destination bucket. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the destination bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Copying objects across different Amazon Web Services Regions isn't supported + // when the source or destination bucket is in Amazon Web Services Local Zones. The + // source and destination buckets must have the same parent Amazon Web Services + // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code + // InvalidRequest . + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must use the + // Outpost bucket access point ARN or the access point alias for the destination + // bucket. + // + // You can only copy objects within the same Outpost bucket. It's not supported to + // copy objects across different Amazon Web Services Outposts, between buckets on + // the same Outposts, or between Outposts buckets and any other bucket types. For + // more information about S3 on Outposts, see [What is S3 on Outposts?]in the S3 on Outposts guide. When + // you use this action with S3 on Outposts through the REST API, you must direct + // requests to the S3 on Outposts hostname, in the format + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The + // hostname isn't required when you use the Amazon Web Services CLI or SDKs. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -155,10 +234,11 @@ type CopyObjectInput struct { // Specifies the source object for the copy operation. The source object can be up // to 5 GB. If the source object is an object that was uploaded by using a // multipart upload, the object copy will be a single part object after the source - // object is copied to the destination bucket. You specify the value of the copy - // source in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending on + // whether you want to access the source object through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and the key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the general purpose @@ -167,6 +247,7 @@ type CopyObjectInput struct { // bucket awsexamplebucket--use1-az5--x-s3 , use // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be // URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -174,15 +255,20 @@ type CopyObjectInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your source bucket versioning is enabled, the x-amz-copy-source header by // default identifies the current version of an object to copy. If the current // version is a delete marker, Amazon S3 behaves as if the object was deleted. To @@ -190,14 +276,21 @@ type CopyObjectInput struct { // append ?versionId= to the value (for example, // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. If you enable versioning on the destination bucket, Amazon S3 - // generates a unique version ID for the copied object. This version ID is - // different from the version ID of the source object. Amazon S3 returns the - // version ID of the copied object in the x-amz-version-id response header in the - // response. If you do not enable versioning or suspend it on the destination - // bucket, the version ID that Amazon S3 generates in the x-amz-version-id - // response header is always null. Directory buckets - S3 Versioning isn't enabled - // and supported for directory buckets. + // source object. + // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from the + // version ID of the source object. Amazon S3 returns the version ID of the copied + // object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, the + // version ID that Amazon S3 generates in the x-amz-version-id response header is + // always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -207,51 +300,71 @@ type CopyObjectInput struct { // This member is required. Key *string - // The canned access control list (ACL) to apply to the object. When you copy an - // object, the ACL metadata is not preserved and is set to private by default. - // Only the owner has full access control. To override the default ACL setting, - // specify a new ACL when you generate a copy request. For more information, see - // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . If the destination bucket that you're copying objects to uses the bucket owner + // The canned access control list (ACL) to apply to the object. + // + // When you copy an object, the ACL metadata is not preserved and is set to private + // by default. Only the owner has full access control. To override the default ACL + // setting, specify a new ACL when you generate a copy request. For more + // information, see [Using ACLs]. + // + // If the destination bucket that you're copying objects to uses the bucket owner // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect // permissions. Buckets that use this setting only accept PUT requests that don't // specify an ACL or PUT requests that specify bucket owner full control ACLs, // such as the bucket-owner-full-control canned ACL or an equivalent form of this - // ACL expressed in the XML format. For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 + // User Guide. + // // - If your destination bucket uses the bucket owner enforced setting for // Object Ownership, all objects written to the bucket by any account will be owned // by the bucket owner. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. + // // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t - // affect bucket-level settings for S3 Bucket Key. For more information, see - // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // affect bucket-level settings for S3 Bucket Key. + // + // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. + // + // Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS + // encrypted objects from general purpose buckets to directory buckets, from + // directory buckets to general purpose buckets, or between directory buckets, + // through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request + // is made for a KMS-encrypted object. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html BucketKeyEnabled *bool // Specifies the caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. When you copy an object, if the source object has a - // checksum, that checksum value will be copied to the new object by default. If - // the CopyObject request does not include this x-amz-checksum-algorithm header, - // the checksum algorithm will be copied from the source object to the destination - // object (if it's present on the source object). You can optionally specify a - // different checksum algorithm to use with the x-amz-checksum-algorithm header. - // Unrecognized or unsupported values will respond with the HTTP status code 400 - // Bad Request . For directory buckets, when you use Amazon Web Services SDKs, - // CRC32 is the default checksum algorithm that's used for performance. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's present + // on the source object). You can optionally specify a different checksum algorithm + // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported + // values will respond with the HTTP status code 400 Bad Request . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Specifies presentational information for the object. Indicates whether an @@ -261,8 +374,10 @@ type CopyObjectInput struct { // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language the content is in. @@ -271,62 +386,85 @@ type CopyObjectInput struct { // A standard MIME type that describes the format of the object data. ContentType *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both the - // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers - // are present in the request and evaluate as follows, Amazon S3 returns the 412 - // Precondition Failed response code: + // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request and + // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + // code: + // // - x-amz-copy-source-if-none-match condition evaluates to false + // // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both the x-amz-copy-source-if-none-match and + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request and // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response // code: + // // - x-amz-copy-source-if-none-match condition evaluates to false + // // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C, - // you must provide the necessary encryption information in your request so that - // Amazon S3 can decrypt the object for copying. This functionality is not - // supported when the source object is in a directory bucket. + // AES256 ). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be the same - // one that was used when the source object was created. If the source object for - // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary - // encryption information in your request so that Amazon S3 can decrypt the object - // for copying. This functionality is not supported when the source object is in a - // directory bucket. + // one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. If the source object for the copy - // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption - // information in your request so that Amazon S3 can decrypt the object for - // copying. This functionality is not supported when the source object is in a - // directory bucket. + // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -343,22 +481,30 @@ type CopyObjectInput struct { Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string @@ -368,26 +514,32 @@ type CopyObjectInput struct { // Specifies whether the metadata is copied from the source object or replaced // with metadata that's provided in the request. When copying an object, you can // preserve all metadata (the default) or specify new metadata. If this header - // isn’t specified, COPY is the default behavior. General purpose bucket - For - // general purpose buckets, when you grant permissions, you can use the - // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior - // when objects are uploaded. For more information, see Amazon S3 condition key - // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) - // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each - // object and is not copied when using the x-amz-metadata-directive header. To - // copy the value, you must specify x-amz-website-redirect-location in the request - // header. + // isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant + // permissions, you can use the s3:x-amz-metadata-directive condition key to + // enforce certain metadata behavior when objects are uploaded. For more + // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied when + // using the x-amz-metadata-directive header. To copy the value, you must specify + // x-amz-website-redirect-location in the request header. + // + // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html MetadataDirective types.MetadataDirective - // Specifies whether you want to apply a legal hold to the object copy. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to the object copy. This - // functionality is not supported for directory buckets. + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want the Object Lock of the object copy to expire. + // // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time @@ -395,19 +547,23 @@ type CopyObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). When you perform a CopyObject operation, if you want to use a different type - // of encryption setting for the target object, you can specify appropriate + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // When you perform a CopyObject operation, if you want to use a different type of + // encryption setting for the target object, you can specify appropriate // encryption-related headers to encrypt the target object with an Amazon S3 // managed key, a KMS key, or a customer-provided key. If the encryption setting in // your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. + // // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerAlgorithm *string @@ -416,81 +572,155 @@ type CopyObjectInput struct { // encrypting data. This value is used to store the object and then it is // discarded. Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported when the destination bucket is a directory bucket. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This value must be explicitly - // added to specify encryption context for CopyObject requests. This functionality - // is not supported when the destination bucket is a directory bucket. + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for the destination object encryption. The value of + // this header is a base64-encoded UTF-8 string holding JSON with the encryption + // context key-value pairs. + // + // General purpose buckets - This value must be explicitly added to specify + // encryption context for CopyObject requests if you want an additional encryption + // context for your destination object. The additional encryption context of the + // source object won't be copied to the destination object. For more information, + // see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context SSEKMSEncryptionContext *string - // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object // encryption. All GET and PUT requests for an object protected by KMS will fail if // they're not made via SSL or using SigV4. For information about configuring any // of the officially supported Amazon Web Services SDKs and Amazon Web Services - // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported - // values won’t write a destination object and will receive a 400 Bad Request - // response. Amazon S3 automatically encrypts all new objects that are copied to an - // S3 bucket. When copying an object, if you don't specify encryption information - // in your copy request, the encryption setting of the target object is set to the + // The server-side encryption algorithm used when storing this object in Amazon + // S3. Unrecognized or unsupported values won’t write a destination object and will + // receive a 400 Bad Request response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information in + // your copy request, the encryption setting of the target object is set to the // default encryption configuration of the destination bucket. By default, all // buckets have a base level of encryption configuration that uses server-side // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a - // default encryption configuration that uses server-side encryption with Key - // Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with - // Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with - // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS - // key, or a customer-provided key to encrypt the target object copy. When you - // perform a CopyObject operation, if you want to use a different type of - // encryption setting for the target object, you can specify appropriate - // encryption-related headers to encrypt the target object with an Amazon S3 - // managed key, a KMS key, or a customer-provided key. If the encryption setting in - // your request is different from the default encryption configuration of the - // destination bucket, the encryption setting in your request takes precedence. - // With server-side encryption, Amazon S3 encrypts your data as it writes your data - // to disks in its data centers and decrypts the data when you access it. For more - // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) - // in the Amazon S3 User Guide. For directory buckets, only server-side encryption - // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // different default encryption configuration, Amazon S3 uses the corresponding + // encryption key to encrypt the target object copy. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. For + // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide. + // + // General purpose buckets + // + // - For general purpose buckets, there are the following supported options for + // server-side encryption: server-side encryption with Key Management Service (KMS) + // keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS + // keys (DSSE-KMS), and server-side encryption with customer-provided encryption + // keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided + // key to encrypt the target object copy. + // + // - When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // + // Directory buckets + // + // - For directory buckets, there are only two supported options for server-side + // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( + // AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We + // recommend that the bucket's default encryption uses the desired encryption + // configuration and you don't override the bucket default encryption in your + // CreateSession requests or PUT object requests. Then, new objects are + // automatically encrypted with the desired encryption settings. For more + // information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the + // encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // - To encrypt new object copies to a directory bucket with SSE-KMS, we + // recommend you specify SSE-KMS as the directory bucket's default encryption + // configuration with a KMS key (specifically, a [customer managed key]). The [Amazon Web Services managed key]( aws/s3 ) isn't + // supported. Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket + // for the lifetime of the bucket. After you specify a customer managed key for + // SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS + // configuration. Then, when you perform a CopyObject operation and want to + // specify server-side encryption settings for new object copies with SSE-KMS in + // the encryption-related request headers, you must ensure the encryption key is + // the same customer managed key that you specified for the directory bucket's + // default encryption configuration. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk ServerSideEncryption types.ServerSideEncryption // If the x-amz-storage-class header is not used, the copied object will be stored // in the STANDARD Storage Class by default. The STANDARD storage class provides // high durability and high availability. Depending on performance needs, you can // specify a different Storage Class. - // - Directory buckets - For directory buckets, only the S3 Express One Zone - // storage class is supported to store newly created objects. Unsupported storage - // class values won't write a destination object and will respond with the HTTP - // status code 400 Bad Request . + // + // - Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported + // storage class values won't write a destination object and will respond with the + // HTTP status code 400 Bad Request . + // // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + // // You can use the CopyObject action to change the storage class of an object that // is already stored in Amazon S3 by using the x-amz-storage-class header. For - // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. Before using an object as a source object for the - // copy operation, you must restore a copy of it if it meets any of the following - // conditions: + // more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . - // - The storage class of the source object is INTELLIGENT_TIERING and it's S3 - // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) - // is Archive Access or Deep Archive Access . - // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) - // in the Amazon S3 User Guide. + // + // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is + // Archive Access or Deep Archive Access . + // + // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html + // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition StorageClass types.StorageClass // The tag-set for the object copy in the destination bucket. This value must be @@ -498,60 +728,82 @@ type CopyObjectInput struct { // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive // , you don't need to set the x-amz-tagging header, because the tag-set will be // copied from the source object directly. The tag-set must be encoded as URL Query - // parameters. The default value is the empty value. Directory buckets - For - // directory buckets in a CopyObject operation, only the empty tag-set is - // supported. Any requests that attempt to write non-empty tags into directory - // buckets will receive a 501 Not Implemented status code. When the destination - // bucket is a directory bucket, you will receive a 501 Not Implemented response - // in any of the following situations: + // parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. Tagging *string // Specifies whether the object tag-set is copied from the source object or - // replaced with the tag-set that's provided in the request. The default value is - // COPY . Directory buckets - For directory buckets in a CopyObject operation, - // only the empty tag-set is supported. Any requests that attempt to write - // non-empty tags into directory buckets will receive a 501 Not Implemented status - // code. When the destination bucket is a directory bucket, you will receive a 501 - // Not Implemented response in any of the following situations: + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY . + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. @@ -562,71 +814,79 @@ type CopyObjectInput struct { // Amazon S3 stores the value of this header in the object metadata. This value is // unique to each object and is not copied when using the x-amz-metadata-directive // header. Instead, you may opt to provide this header in combination with the - // x-amz-metadata-directive header. This functionality is not supported for - // directory buckets. + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.CopySource = in.CopySource + p.Key = in.Key p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Container for all response elements. CopyObjectResult *types.CopyObjectResult - // Version ID of the source object that was copied. This functionality is not - // supported when the source object is in a directory bucket. + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string - // If the object expiration is configured, the response includes this header. This - // functionality is not supported for directory buckets. + // If the object expiration is configured, the response includes this header. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory + // buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). ServerSideEncryption types.ServerSideEncryption - // Version ID of the newly created copy. This functionality is not supported for - // directory buckets. + // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -657,25 +917,28 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -693,6 +956,18 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCopyObjectValidationMiddleware(stack); err != nil { return err } @@ -702,7 +977,7 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { @@ -729,6 +1004,18 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go index 6357444b..5dfadb4f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -15,82 +15,118 @@ import ( ) // This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts -// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) -// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and -// have a valid Amazon Web Services Access Key ID to authenticate requests. -// Anonymous requests are never allowed to create buckets. By creating the bucket, -// you become the bucket owner. There are two types of buckets: general purpose -// buckets and directory buckets. For more information about these bucket types, -// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// in the Amazon S3 User Guide. +// bucket, see [CreateBucket]CreateBucket . +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have +// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you become +// the bucket owner. +// +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide. +// // - General purpose buckets - If you send your CreateBucket request to the // s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So // the signature calculations in Signature Version 4 must use us-east-1 as the // Region, even if the location constraint in the request specifies another Region // where the bucket is to be created. If you create a bucket in a Region other than // US East (N. Virginia), your application must be able to handle 307 redirect. For -// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) -// in the Amazon S3 User Guide. +// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - In addition to the s3:CreateBucket // permission, the following permissions are required in a policy when your // CreateBucket request includes specific headers: +// // - Access control lists (ACLs) - In your CreateBucket request, if you specify // an access control list (ACL) and set it to public-read , public-read-write , // authenticated-read , or if you explicitly specify any other custom ACLs, both // s3:CreateBucket and s3:PutBucketAcl permissions are required. In your // CreateBucket request, if you set the ACL to private , or if you don't specify // any ACLs, only the s3:CreateBucket permission is required. +// // - Object Lock - In your CreateBucket request, if you set // x-amz-bucket-object-lock-enabled to true, the // s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are // required. +// // - S3 Object Ownership - If your CreateBucket request includes the // x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. If your CreateBucket request sets BucketOwnerEnforced -// for Amazon S3 Object Ownership and specifies a bucket ACL that provides access -// to an external Amazon Web Services account, your request fails with a 400 -// error and returns the InvalidBucketAcLWithObjectOwnership error code. For more -// information, see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) -// in the Amazon S3 User Guide. -// - S3 Block Public Access - If your specific use case requires granting public -// access to your S3 resources, you can disable Block Public Access. Specifically, -// you can create a new bucket with Block Public Access enabled, then separately -// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about S3 Block Public Access, see Blocking -// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - You must have the s3express:CreateBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object -// Ownership, and S3 Block Public Access are not supported for directory buckets. -// For directory buckets, all Block Public Access settings are enabled at the -// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs -// disabled). These settings can't be modified. For more information about -// permissions for creating and working with directory buckets, see Directory -// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. For more information about supported S3 features -// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) -// in the Amazon S3 User Guide. +// permission is required. +// +// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly +// +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and then +// explicitly disable Block Public Access on the bucket before using PutBucketAcl +// to set the ACL. If you try to create a bucket with a public ACL, the request +// will fail. +// +// For the majority of modern use cases in S3, we recommend that you keep all +// +// Block Public Access settings enabled and keep ACLs disabled. If you would like +// to share data with users outside of your account, you can use bucket policies as +// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - S3 Block Public Access - If your specific use case requires granting public +// access to your S3 resources, you can disable Block Public Access. Specifically, +// you can create a new bucket with Block Public Access enabled, then separately +// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the +// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block +// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. +// +// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public +// +// Access are not supported for directory buckets. For directory buckets, all Block +// Public Access settings are enabled at the bucket level and S3 Object Ownership +// is set to Bucket owner enforced (ACLs disabled). These settings can't be +// modified. +// +// For more information about permissions for creating and working with directory // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// CreateBucket : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about +// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to CreateBucket : +// +// [PutObject] +// +// [DeleteBucket] +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html +// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { if params == nil { params = &CreateBucketInput{} @@ -108,77 +144,100 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op type CreateBucketInput struct { - // The name of the bucket to create. General purpose buckets - For information - // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) - // in the Amazon S3 User Guide. Directory buckets - When you use this operation - // with a directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The name of the bucket to create. + // + // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules] + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html // // This member is required. Bucket *string - // The canned ACL to apply to the bucket. This functionality is not supported for - // directory buckets. + // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. ACL types.BucketCannedACL // The configuration information for the bucket. CreateBucketConfiguration *types.CreateBucketConfiguration // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for directory buckets. + // bucket. + // + // This functionality is not supported for directory buckets. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for directory buckets. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // directory buckets. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. This functionality is not supported for directory buckets. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. + // + // This functionality is not supported for directory buckets. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for directory buckets. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. GrantWriteACP *string // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // // This functionality is not supported for directory buckets. ObjectLockEnabledForBucket *bool // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ObjectOwnership types.ObjectOwnership noSmithyDocumentSerde } func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) p.DisableAccessPoints = ptr.Bool(true) @@ -217,25 +276,28 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -253,6 +315,18 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateBucketValidationMiddleware(stack); err != nil { return err } @@ -262,7 +336,7 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { @@ -286,6 +360,18 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go new file mode 100644 index 00000000..fbac4458 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go @@ -0,0 +1,293 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a metadata table configuration for a general purpose bucket. For more +// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the following permissions. For +// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. +// +// If you also want to integrate your table bucket with Amazon Web Services +// analytics services so that you can query your metadata table, you need +// additional permissions. For more information, see [Integrating Amazon S3 Tables with Amazon Web Services analytics services]in the Amazon S3 User Guide. +// +// - s3:CreateBucketMetadataTableConfiguration +// +// - s3tables:CreateNamespace +// +// - s3tables:GetTable +// +// - s3tables:CreateTable +// +// - s3tables:PutTablePolicy +// +// The following operations are related to CreateBucketMetadataTableConfiguration : +// +// [DeleteBucketMetadataTableConfiguration] +// +// [GetBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html +// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +// [Integrating Amazon S3 Tables with Amazon Web Services analytics services]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-aws.html +func (c *Client) CreateBucketMetadataTableConfiguration(ctx context.Context, params *CreateBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*CreateBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &CreateBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateBucketMetadataTableConfiguration", params, optFns, c.addOperationCreateBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that you want to create the metadata table + // configuration in. + // + // This member is required. + Bucket *string + + // The contents of your metadata table configuration. + // + // This member is required. + MetadataTableConfiguration *types.MetadataTableConfiguration + + // The checksum algorithm to use with your metadata table configuration. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The Content-MD5 header for the metadata table configuration. + ContentMD5 *string + + // The expected owner of the general purpose bucket that contains your metadata + // table configuration. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *CreateBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type CreateBucketMetadataTableConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *CreateBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateBucketMetadataTableConfiguration", + } +} + +// getCreateBucketMetadataTableConfigurationRequestAlgorithmMember gets the +// request checksum algorithm value provided as input. +func getCreateBucketMetadataTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*CreateBucketMetadataTableConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getCreateBucketMetadataTableConfigurationRequestAlgorithmMember, + RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getCreateBucketMetadataTableConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getCreateBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go index 4f24e11c..2842e8a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -16,51 +16,54 @@ import ( // This action initiates a multipart upload and returns an upload ID. This upload // ID is used to associate all of the parts in the specific multipart upload. You -// specify this upload ID in each of your subsequent upload part requests (see -// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// ). You also include this upload ID in the final request to either complete or -// abort the multipart upload request. For more information about multipart -// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide. After you initiate a multipart upload and upload -// one or more parts, to stop being charged for storing the uploaded parts, you -// must either complete or abort the multipart upload. Amazon S3 frees up the space -// used to store the parts and stops charging you for storing them only after you -// either complete or abort a multipart upload. If you have configured a lifecycle -// rule to abort incomplete multipart uploads, the created multipart upload must be -// completed within the number of days specified in the bucket lifecycle -// configuration. Otherwise, the incomplete multipart upload becomes eligible for -// an abort action and Amazon S3 aborts the multipart upload. For more information, -// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// . +// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]). +// You also include this upload ID in the final request to either complete or abort +// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview] +// in the Amazon S3 User Guide. +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or abort +// the multipart upload. Amazon S3 frees up the space used to store the parts and +// stops charging you for storing them only after you either complete or abort a +// multipart upload. +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts the +// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. +// // - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Request signing For request signing, multipart upload is just a series of // regular requests. You initiate a multipart upload, send one or more requests to // upload parts, and then complete the multipart upload process. You sign each // request individually. There is nothing special about signing multipart upload -// requests. For more information about signing, see Authenticating Requests -// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// in the Amazon S3 User Guide. Permissions -// - General purpose bucket permissions - For information about the permissions -// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. To perform a multipart upload with encryption by -// using an Amazon Web Services KMS key, the requester must have permission to the -// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are -// required because Amazon S3 must decrypt and read data from the encrypted file -// parts before it completes the multipart upload. For more information, see -// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. +// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To perform a multipart upload with +// encryption using an Key Management Service (KMS) KMS key, the requester must +// have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. +// The requester must also have permissions for the kms:GenerateDataKey action +// for the CreateMultipartUpload API. Then, the requester needs permissions for +// the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These +// permissions are required because Amazon S3 must decrypt and read data from the +// encrypted file parts before it completes the multipart upload. For more +// information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -68,10 +71,10 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Encryption +// // - General purpose buckets - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. Amazon S3 automatically encrypts all new @@ -91,61 +94,135 @@ import ( // in your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. If // you choose to provide your own encryption key, the request headers you provide -// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the CreateMultipartUpload request. +// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload +// request. +// // - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( // aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) // – If you want Amazon Web Services to manage the keys used to encrypt data, // specify the following headers in the request. +// // - x-amz-server-side-encryption +// // - x-amz-server-side-encryption-aws-kms-key-id +// // - x-amz-server-side-encryption-context +// // - If you specify x-amz-server-side-encryption:aws:kms , but don't provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web // Services managed key ( aws/s3 key) in KMS to protect the data. +// // - To perform a multipart upload with encryption by using an Amazon Web // Services KMS key, the requester must have permission to the kms:Decrypt and // kms:GenerateDataKey* actions on the key. These permissions are required // because Amazon S3 must decrypt and read data from the encrypted file parts -// before it completes the multipart upload. For more information, see Multipart -// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. +// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in +// the Amazon S3 User Guide. +// // - If your Identity and Access Management (IAM) user or role is in the same // Amazon Web Services account as the KMS key, then you must have these permissions // on the key policy. If your IAM user or role is in a different account from the // key, then you must have the permissions on both the key policy and your IAM user // or role. +// // - All GET and PUT requests for an object protected by KMS fail if you don't // make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), // or Signature Version 4. For information about configuring any of the officially -// supported Amazon Web Services SDKs and Amazon Web Services CLI, see -// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) -// in the Amazon S3 User Guide. For more information about server-side -// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. -// - Use customer-provided encryption keys (SSE-C) – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with customer-provided encryption keys (SSE-C), see -// Protecting data using server-side encryption with customer-provided encryption -// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. -// - Directory buckets -For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CreateMultipartUpload : -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the +// Amazon S3 User Guide. +// +// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys] +// +// in the Amazon S3 User Guide. +// +// - Use customer-provided encryption keys (SSE-C) – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about server-side encryption with customer-provided +// +// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses +// the desired encryption configuration and you don't override the bucket default +// encryption in your CreateSession requests or PUT object requests. Then, new +// objects are automatically encrypted with the desired encryption settings. For +// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about +// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the +// +// encryption request headers must match the encryption settings that are specified +// in the CreateSession request. You can't override the values of the encryption +// settings ( x-amz-server-side-encryption , +// x-amz-server-side-encryption-aws-kms-key-id , +// x-amz-server-side-encryption-context , and +// x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the +// CreateSession request. You don't need to explicitly specify these encryption +// settings values in Zonal endpoint API calls, and Amazon S3 will use the +// encryption settings values from the CreateSession request to protect new +// objects in the directory bucket. +// +// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the +// +// session token refreshes automatically to avoid service interruptions when a +// session expires. The CLI or the Amazon Web Services SDKs use the bucket's +// default encryption configuration for the CreateSession request. It's not +// supported to override the encryption settings values in the CreateSession +// request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption +// request headers must match the default encryption configuration of the directory +// bucket. +// +// For directory buckets, when you perform a CreateMultipartUpload operation and an +// +// UploadPartCopy operation, the request headers you provide in the +// CreateMultipartUpload request must match the default encryption configuration +// of the destination bucket. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to CreateMultipartUpload : +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { if params == nil { params = &CreateMultipartUploadInput{} @@ -164,30 +241,40 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip type CreateMultipartUploadInput struct { // The name of the bucket where the multipart upload is initiated and where the - // object is uploaded. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // object is uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -199,41 +286,67 @@ type CreateMultipartUploadInput struct { // The canned ACL to apply to the object. Amazon S3 supports a set of predefined // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and - // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. By default, all objects are private. Only the owner - // has full access control. When uploading an object, you can grant access - // permissions to individual Amazon Web Services accounts or to predefined groups - // defined by Amazon S3. These permissions are then added to the access control - // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . One way to grant the permissions using the request headers is to specify a - // canned ACL with the x-amz-acl request header. + // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual Amazon + // Web Services accounts or to predefined groups defined by Amazon S3. These + // permissions are then added to the access control list (ACL) on the new object. + // For more information, see [Using ACLs]. One way to grant the permissions using the request + // headers is to specify a canned ACL with the x-amz-acl request header. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with an object action doesn’t - // affect bucket-level settings for S3 Bucket Key. This functionality is not - // supported for directory buckets. + // + // General purpose buckets - Setting this header to true causes Amazon S3 to use + // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + // + // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm + // Indicates the checksum type that you want Amazon S3 to use to calculate the + // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language that the content is in. @@ -251,213 +364,386 @@ type CreateMultipartUploadInput struct { Expires *time.Time // Specify access permissions explicitly to give the grantee READ, READ_ACP, and - // WRITE_ACP permissions on the object. By default, all objects are private. Only - // the owner has full access control. When uploading an object, you can use this - // header to explicitly grant access permissions to specific Amazon Web Services - // accounts or groups. This header maps to specific permissions that Amazon S3 - // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // WRITE_ACP permissions on the object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantFullControl *string // Specify access permissions explicitly to allow grantee to read the object data - // and its metadata. By default, all objects are private. Only the owner has full - // access control. When uploading an object, you can use this header to explicitly - // grant access permissions to specific Amazon Web Services accounts or groups. - // This header maps to specific permissions that Amazon S3 supports in an ACL. For - // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // and its metadata. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantRead *string // Specify access permissions explicitly to allows grantee to read the object ACL. + // // By default, all objects are private. Only the owner has full access control. // When uploading an object, you can use this header to explicitly grant access // permissions to specific Amazon Web Services accounts or groups. This header maps // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantReadACP *string // Specify access permissions explicitly to allows grantee to allow grantee to - // write the ACL for the applicable object. By default, all objects are private. - // Only the owner has full access control. When uploading an object, you can use - // this header to explicitly grant access permissions to specific Amazon Web - // Services accounts or groups. This header maps to specific permissions that - // Amazon S3 supports in an ACL. For more information, see Access Control List - // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // write the ACL for the applicable object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string - // Specifies whether you want to apply a legal hold to the uploaded object. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Specifies the Object Lock mode that you want to apply to the uploaded object. + // // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // Specifies the date and time when you want the Object Lock to expire. This - // functionality is not supported for directory buckets. + // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. This - // functionality is not supported for directory buckets. + // to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This functionality is not - // supported for directory buckets. + // encryption. The value of this header is a Base64 encoded string of a UTF-8 + // encoded JSON, which contains the encryption context as key-value pairs. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. SSEKMSEncryptionContext *string - // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption - // customer managed key to use for object encryption. This functionality is not - // supported for directory buckets. + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. If the KMS key doesn't exist in the same account that's issuing the + // command, you must use the full Key ARN not the Key ID. + // + // General purpose buckets - If you specify x-amz-server-side-encryption with + // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + // Alias) of the KMS key to use. If you specify + // x-amz-server-side-encryption:aws:kms or + // x-amz-server-side-encryption:aws:kms:dsse , but do not provide + // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + // Services managed key ( aws/s3 ) to protect the data. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). + // + // - Directory buckets - For directory buckets, there are only two supported + // options for server-side encryption: server-side encryption with Amazon S3 + // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + // the desired encryption configuration and you don't override the bucket default + // encryption in your CreateSession requests or PUT object requests. Then, new + // objects are automatically encrypted with the desired encryption settings. For + // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + // encryption request headers must match the encryption settings that are specified + // in the CreateSession request. You can't override the values of the encryption + // settings ( x-amz-server-side-encryption , + // x-amz-server-side-encryption-aws-kms-key-id , + // x-amz-server-side-encryption-context , and + // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + // CreateSession request. You don't need to explicitly specify these encryption + // settings values in Zonal endpoint API calls, and Amazon S3 will use the + // encryption settings values from the CreateSession request to protect new + // objects in the directory bucket. + // + // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + // session token refreshes automatically to avoid service interruptions when a + // session expires. The CLI or the Amazon Web Services SDKs use the bucket's + // default encryption configuration for the CreateSession request. It's not + // supported to override the encryption settings values in the CreateSession + // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + // request headers must match the default encryption configuration of the directory + // bucket. + // + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. - // - For directory buckets, only the S3 Express One Zone storage class is - // supported to store newly created objects. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone + // storage class) in Availability Zones and ONEZONE_IA (the S3 One + // Zone-Infrequent Access storage class) in Dedicated Local Zones. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. This functionality is not supported for directory buckets. + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -469,65 +755,75 @@ type CreateMultipartUploadOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, the response includes this header. The header // indicates when the initiated multipart upload becomes eligible for an abort - // operation. For more information, see Aborting Incomplete Multipart Uploads - // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id - // header that provides the ID of the lifecycle configuration rule that defines the - // abort action. This functionality is not supported for directory buckets. + // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // The response also includes the x-amz-abort-rule-id header that provides the ID + // of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // return the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm + // Indicates the checksum type that you want Amazon S3 to use to calculate the + // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Object key for which the multipart upload was initiated. Key *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. SSEKMSEncryptionContext *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // ID for the initiated multipart upload. @@ -561,25 +857,28 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -597,6 +896,18 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -606,7 +917,7 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { @@ -633,6 +944,18 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go index 1cb1f151..21db1319 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go @@ -15,64 +15,130 @@ import ( ) // Creates a session that establishes temporary security credentials to support -// fast authentication and authorization for the Zonal endpoint APIs on directory -// buckets. For more information about Zonal endpoint APIs that include the -// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) -// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory -// bucket, use the CreateSession API operation. Specifically, you grant -// s3express:CreateSession permission to a bucket in a bucket policy or an IAM -// identity-based policy. Then, you use IAM credentials to make the CreateSession -// API request on the bucket, which returns temporary security credentials that -// include the access key ID, secret access key, session token, and expiration. -// These credentials have associated permissions to access the Zonal endpoint APIs. -// After the session is created, you don’t need to use other policies to grant -// permissions to each Zonal endpoint API individually. Instead, in your Zonal -// endpoint API requests, you sign your requests by applying the temporary security -// credentials of the session to the request headers and following the SigV4 -// protocol for authentication. You also apply the session token to the -// x-amz-s3session-token request header for authorization. Temporary security -// credentials are scoped to the bucket and expire after 5 minutes. After the -// expiration time, any calls that you make with those credentials will fail. You -// must use IAM credentials again to make a CreateSession API request that -// generates a new set of temporary credentials for use. Temporary credentials -// cannot be extended or refreshed beyond the original specified interval. If you -// use Amazon Web Services SDKs, SDKs handle the session token refreshes +// fast authentication and authorization for the Zonal endpoint API operations on +// directory buckets. For more information about Zonal endpoint API operations that +// include the Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 +// User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission to a +// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM +// credentials to make the CreateSession API request on the bucket, which returns +// temporary security credentials that include the access key ID, secret access +// key, session token, and expiration. These credentials have associated +// permissions to access the Zonal endpoint API operations. After the session is +// created, you don’t need to use other policies to grant permissions to each Zonal +// endpoint API individually. Instead, in your Zonal endpoint API requests, you +// sign your requests by applying the temporary security credentials of the session +// to the request headers and following the SigV4 protocol for authentication. You +// also apply the session token to the x-amz-s3session-token request header for +// authorization. Temporary security credentials are scoped to the bucket and +// expire after 5 minutes. After the expiration time, any calls that you make with +// those credentials will fail. You must use IAM credentials again to make a +// CreateSession API request that generates a new set of temporary credentials for +// use. Temporary credentials cannot be extended or refreshed beyond the original +// specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes // automatically to avoid service interruptions when a session expires. We // recommend that you use the Amazon Web Services SDKs to initiate and manage -// requests to the CreateSession API. For more information, see Performance -// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) -// in the Amazon S3 User Guide. +// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3 +// User Guide. +// // - You must make requests for this API operation to the Zonal endpoint. These // endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. -// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject -// API operation doesn't use the temporary security credentials returned from the -// CreateSession API operation for authentication and authorization. For -// information about authentication and authorization of the CopyObject API -// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// . -// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket -// API operation doesn't use the temporary security credentials returned from the -// CreateSession API operation for authentication and authorization. For -// information about authentication and authorization of the HeadBucket API -// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// . +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style +// requests are not supported. For more information about endpoints in Availability +// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints +// in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// - CopyObject API operation - Unlike other Zonal endpoint API operations, the +// CopyObject API operation doesn't use the temporary security credentials +// returned from the CreateSession API operation for authentication and +// authorization. For information about authentication and authorization of the +// CopyObject API operation on directory buckets, see [CopyObject]. +// +// - HeadBucket API operation - Unlike other Zonal endpoint API operations, the +// HeadBucket API operation doesn't use the temporary security credentials +// returned from the CreateSession API operation for authentication and +// authorization. For information about authentication and authorization of the +// HeadBucket API operation on directory buckets, see [HeadBucket]. // // Permissions To obtain temporary security credentials, you must create a bucket // policy or an IAM identity-based policy that grants s3express:CreateSession // permission to the bucket. In a policy, you can have the s3express:SessionMode // condition key to control who can create a ReadWrite or ReadOnly session. For -// more information about ReadWrite or ReadOnly sessions, see -// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters) -// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint -// APIs, the bucket policy should also grant both accounts the -// s3express:CreateSession permission. HTTP Host header syntax Directory buckets - -// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode] +// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3 +// User Guide. +// +// To grant cross-account access to Zonal endpoint API operations, the bucket +// policy should also grant both accounts the s3express:CreateSession permission. +// +// If you want to encrypt objects with SSE-KMS, you must also have the +// kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based +// policies and KMS key policies for the target KMS key. +// +// Encryption For directory buckets, there are only two supported options for +// server-side encryption: server-side encryption with Amazon S3 managed keys +// (SSE-S3) ( AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms +// ). We recommend that the bucket's default encryption uses the desired encryption +// configuration and you don't override the bucket default encryption in your +// CreateSession requests or PUT object requests. Then, new objects are +// automatically encrypted with the desired encryption settings. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the +// encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// For [Zonal endpoint (object-level) API operations] except [CopyObject] and [UploadPartCopy], you authenticate and authorize requests through [CreateSession] for low +// latency. To encrypt new objects in a directory bucket with SSE-KMS, you must +// specify SSE-KMS as the directory bucket's default encryption configuration with +// a KMS key (specifically, a [customer managed key]). Then, when a session is created for Zonal +// endpoint API operations, new objects are automatically encrypted and decrypted +// with SSE-KMS and S3 Bucket Keys during the session. +// +// Only 1 [customer managed key] is supported per directory bucket for the lifetime of the bucket. The [Amazon Web Services managed key] ( +// aws/s3 ) isn't supported. After you specify SSE-KMS as your bucket's default +// encryption configuration with a customer managed key, you can't change the +// customer managed key for the bucket's SSE-KMS configuration. +// +// In the Zonal endpoint API calls (except [CopyObject] and [UploadPartCopy]) using the REST API, you can't +// override the values of the encryption settings ( x-amz-server-side-encryption , +// x-amz-server-side-encryption-aws-kms-key-id , +// x-amz-server-side-encryption-context , and +// x-amz-server-side-encryption-bucket-key-enabled ) from the CreateSession +// request. You don't need to explicitly specify these encryption settings values +// in Zonal endpoint API calls, and Amazon S3 will use the encryption settings +// values from the CreateSession request to protect new objects in the directory +// bucket. +// +// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the +// session token refreshes automatically to avoid service interruptions when a +// session expires. The CLI or the Amazon Web Services SDKs use the bucket's +// default encryption configuration for the CreateSession request. It's not +// supported to override the encryption settings values in the CreateSession +// request. Also, in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), it's not +// supported to override the values of the encryption settings from the +// CreateSession request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters +// [Zonal endpoint (object-level) API operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-differences.html#s3-express-differences-api-operations +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { if params == nil { params = &CreateSessionInput{} @@ -95,29 +161,111 @@ type CreateSessionInput struct { // This member is required. Bucket *string + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using KMS keys (SSE-KMS). + // + // S3 Bucket Keys are always enabled for GET and PUT operations in a directory + // bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy + // SSE-KMS encrypted objects from general purpose buckets to directory buckets, + // from directory buckets to general purpose buckets, or between directory buckets, + // through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a + // copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + BucketKeyEnabled *bool + + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for object encryption. The value of this header is a + // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption + // context as key-value pairs. This value is stored as object metadata and + // automatically gets passed on to Amazon Web Services KMS for future GetObject + // operations on this object. + // + // General purpose buckets - This value must be explicitly added during CopyObject + // operations if you want an additional encryption context for your object. For + // more information, see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context + SSEKMSEncryptionContext *string + + // If you specify x-amz-server-side-encryption with aws:kms , you must specify the + // x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key + // ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you + // get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key + // alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist + // in the same account that't issuing the command, you must use the full Key ARN + // not the Key ID. + // + // Your SSE-KMS configuration can only support 1 [customer managed key] per directory bucket's lifetime. + // The [Amazon Web Services managed key]( aws/s3 ) isn't supported. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + SSEKMSKeyId *string + + // The server-side encryption algorithm to use when you store objects in the + // directory bucket. + // + // For directory buckets, there are only two supported options for server-side + // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 + // ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). By default, + // Amazon S3 encrypts data with SSE-S3. For more information, see [Protecting data with server-side encryption]in the Amazon S3 + // User Guide. + // + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html + ServerSideEncryption types.ServerSideEncryption + // Specifies the mode of the session that will be created, either ReadWrite or // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is - // capable of executing all the Zonal endpoint APIs on a directory bucket. A - // ReadOnly session is constrained to execute the following Zonal endpoint APIs: - // GetObject , HeadObject , ListObjectsV2 , GetObjectAttributes , ListParts , and - // ListMultipartUploads . + // capable of executing all the Zonal endpoint API operations on a directory + // bucket. A ReadOnly session is constrained to execute the following Zonal + // endpoint API operations: GetObject , HeadObject , ListObjectsV2 , + // GetObjectAttributes , ListParts , and ListMultipartUploads . SessionMode types.SessionMode noSmithyDocumentSerde } func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CreateSessionOutput struct { - // The established temporary security credentials for the created session.. + // The established temporary security credentials for the created session. // // This member is required. Credentials *types.SessionCredentials + // Indicates whether to use an S3 Bucket Key for server-side encryption with KMS + // keys (SSE-KMS). + BucketKeyEnabled *bool + + // If present, indicates the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + // This value is stored as object metadata and automatically gets passed on to + // Amazon Web Services KMS for future GetObject operations on this object. + SSEKMSEncryptionContext *string + + // If you specify x-amz-server-side-encryption with aws:kms , this header indicates + // the ID of the KMS symmetric encryption customer managed key that was used for + // object encryption. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store objects in the + // directory bucket. + ServerSideEncryption types.ServerSideEncryption + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -146,25 +294,28 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -182,6 +333,18 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSessionValidationMiddleware(stack); err != nil { return err } @@ -191,7 +354,7 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCreateSessionUpdateEndpoint(stack, options); err != nil { @@ -215,6 +378,18 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go index 34645bb9..9e1edf91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -15,33 +15,45 @@ import ( // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. +// // - Directory buckets - If multipart uploads in a directory bucket are in // progress, you can't delete the bucket until all the in-progress multipart // uploads are aborted or completed. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - You must have the s3:DeleteBucket // permission on the specified bucket in a policy. +// // - Directory bucket permissions - You must have the s3express:DeleteBucket // permission in an IAM identity-based policy instead of a bucket policy. // Cross-account access to this API operation isn't supported. This operation can // only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to DeleteBucket : +// +// [CreateBucket] +// +// [DeleteObject] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucket : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { if params == nil { params = &DeleteBucketInput{} @@ -59,30 +71,36 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op type DeleteBucketInput struct { - // Specifies the bucket being deleted. Directory buckets - When you use this - // operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Specifies the bucket being deleted. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -116,25 +134,28 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -152,6 +173,18 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { return err } @@ -161,7 +194,7 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { @@ -185,6 +218,18 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index 55d8fc1c..a3a1873f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -13,20 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an analytics -// configuration for the bucket (specified by the analytics configuration ID). To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 analytics feature, see Amazon S3 -// Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to DeleteBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to DeleteBucketAnalyticsConfiguration : +// +// [GetBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &DeleteBucketAnalyticsConfigurationInput{} @@ -63,6 +75,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { } func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -96,25 +109,28 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -132,6 +148,18 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -141,7 +169,7 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { @@ -165,6 +193,18 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go index 9a544922..ebae81c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -13,14 +13,25 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the cors -// configuration information set for the bucket. To use this operation, you must -// have permission to perform the s3:PutBucketCORS action. The bucket owner has -// this permission by default and can grant this permission to others. For -// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. Related Resources -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// This operation is not supported for directory buckets. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// # Related Resources +// +// [PutBucketCors] +// +// [RESTOPTIONSobject] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { if params == nil { params = &DeleteBucketCorsInput{} @@ -52,6 +63,7 @@ type DeleteBucketCorsInput struct { } func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -85,25 +97,28 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -121,6 +136,18 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -130,7 +157,7 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { @@ -154,6 +181,18 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go index 0ef6f93e..bf654025 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -13,20 +13,46 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the DELETE action resets the default encryption for the bucket as server-side -// encryption with Amazon S3 managed keys (SSE-S3). For information about the -// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permissions to -// perform the s3:PutEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// This implementation of the DELETE action resets the default encryption for the +// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets - For information about the bucket default +// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. For information about +// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:PutEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to DeleteBucketEncryption : +// +// [PutBucketEncryption] +// +// [GetBucketEncryption] +// +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { if params == nil { params = &DeleteBucketEncryptionInput{} @@ -47,18 +73,34 @@ type DeleteBucketEncryptionInput struct { // The name of the bucket containing the server-side encryption configuration to // delete. // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -92,25 +134,28 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -128,6 +173,18 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -137,7 +194,7 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { @@ -161,6 +218,18 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go index eeaefc5f..a62386c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -13,25 +13,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to DeleteBucketIntelligentTieringConfiguration include: -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// [GetBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &DeleteBucketIntelligentTieringConfigurationInput{} @@ -64,6 +77,7 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { } func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -97,25 +111,28 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -133,6 +150,18 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -142,7 +171,7 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { @@ -166,6 +195,18 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go index 28a93e0d..d24e3f75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an inventory -// configuration (identified by the inventory ID) from the bucket. To use this -// operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . Operations related to DeleteBucketInventoryConfiguration include: -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// This operation is not supported for directory buckets. +// +// Deletes an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// [GetBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { if params == nil { params = &DeleteBucketInventoryConfigurationInput{} @@ -61,6 +75,7 @@ type DeleteBucketInventoryConfigurationInput struct { } func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -94,25 +109,28 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -130,6 +148,18 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -139,7 +169,7 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { @@ -163,6 +193,18 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go index d7b8eb58..489be24c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -13,20 +13,61 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the lifecycle -// configuration from the specified bucket. Amazon S3 removes all the lifecycle -// configuration rules in the lifecycle subresource associated with the bucket. -// Your objects never expire, and Amazon S3 no longer automatically deletes any -// objects on the basis of rules contained in the deleted lifecycle configuration. -// To use this operation, you must have permission to perform the -// s3:PutLifecycleConfiguration action. By default, the bucket owner has this -// permission and the bucket owner can grant this permission to others. There is -// usually some time lag before lifecycle configuration deletion is fully -// propagated to all the Amazon S3 systems. For more information about the object -// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions) -// . Related actions include: -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the deleted +// lifecycle configuration. +// +// Permissions +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:PutLifecycleConfiguration permission. +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// For more information about the object expiration, see [Elements to Describe Lifecycle Actions]. +// +// Related actions include: +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { if params == nil { params = &DeleteBucketLifecycleInput{} @@ -52,12 +93,16 @@ type DeleteBucketLifecycleInput struct { // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -91,25 +136,28 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -127,6 +175,18 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { return err } @@ -136,7 +196,7 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { @@ -160,6 +220,18 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go new file mode 100644 index 00000000..880d74d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a metadata table configuration from a general purpose bucket. For more +// +// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the +// s3:DeleteBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketMetadataTableConfiguration : +// +// [CreateBucketMetadataTableConfiguration] +// +// [GetBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html +// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +func (c *Client) DeleteBucketMetadataTableConfiguration(ctx context.Context, params *DeleteBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetadataTableConfiguration", params, optFns, c.addOperationDeleteBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that you want to remove the metadata table + // configuration from. + // + // This member is required. + Bucket *string + + // The expected bucket owner of the general purpose bucket that you want to + // remove the metadata table configuration from. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketMetadataTableConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketMetadataTableConfiguration", + } +} + +// getDeleteBucketMetadataTableConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getDeleteBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go index a6675a88..ed96b025 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -13,22 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes a metrics -// configuration for the Amazon CloudWatch request metrics (specified by the -// metrics configuration ID) from the bucket. Note that this doesn't include the -// daily storage metrics. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to DeleteBucketMetricsConfiguration : -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported for directory buckets. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to DeleteBucketMetricsConfiguration : +// +// [GetBucketMetricsConfiguration] +// +// [PutBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { if params == nil { params = &DeleteBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type DeleteBucketMetricsConfigurationInput struct { } func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -99,25 +113,28 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -135,6 +152,18 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -144,7 +173,7 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { @@ -168,6 +197,18 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go index 1b53e08e..4775a251 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -13,14 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes OwnershipControls -// for an Amazon S3 bucket. To use this operation, you must have the -// s3:PutBucketOwnershipControls permission. For more information about Amazon S3 -// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html) -// . The following operations are related to DeleteBucketOwnershipControls : -// - GetBucketOwnershipControls -// - PutBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to DeleteBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # PutBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { if params == nil { params = &DeleteBucketOwnershipControlsInput{} @@ -52,6 +60,7 @@ type DeleteBucketOwnershipControlsInput struct { } func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -85,25 +94,28 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -121,6 +133,18 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -130,7 +154,7 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { @@ -154,6 +178,18 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go index 0d8e77ff..048abe79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -13,44 +13,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the DeleteBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns -// a 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the DeleteBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not using +// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:DeleteBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// # The following operations are related to DeleteBucketPolicy // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucketPolicy -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// [CreateBucket] +// +// [DeleteObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { if params == nil { params = &DeleteBucketPolicyInput{} @@ -68,30 +83,36 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol type DeleteBucketPolicyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -125,25 +146,28 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -161,6 +185,18 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -170,7 +206,7 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { @@ -194,6 +230,18 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go index 7ac11a7b..f5c434b8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the replication -// configuration from the bucket. To use this operation, you must have permissions -// to perform the s3:PutReplicationConfiguration action. The bucket owner has -// these permissions by default and can grant it to others. For more information -// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . It can take a while for the deletion of a replication configuration to fully -// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// This operation is not supported for directory buckets. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutReplicationConfiguration action. The bucket owner has these permissions by +// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// The following operations are related to DeleteBucketReplication : +// +// [PutBucketReplication] +// +// [GetBucketReplication] +// +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { if params == nil { params = &DeleteBucketReplicationInput{} @@ -42,7 +56,7 @@ func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBuck type DeleteBucketReplicationInput struct { - // The bucket name. + // The bucket name. // // This member is required. Bucket *string @@ -56,6 +70,7 @@ type DeleteBucketReplicationInput struct { } func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -89,25 +104,28 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -125,6 +143,18 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -134,7 +164,7 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { @@ -158,6 +188,18 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go index 5a75a1f4..ab73775a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -13,13 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the tags from the -// bucket. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the // s3:PutBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. The following operations are related to -// DeleteBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// can grant this permission to others. +// +// The following operations are related to DeleteBucketTagging : +// +// [GetBucketTagging] +// +// [PutBucketTagging] +// +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { if params == nil { params = &DeleteBucketTaggingInput{} @@ -51,6 +60,7 @@ type DeleteBucketTaggingInput struct { } func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -84,25 +94,28 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -120,6 +133,18 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -129,7 +154,7 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { @@ -153,6 +178,18 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go index dbe84fbb..6ea3b745 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -13,20 +13,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action removes the -// website configuration for a bucket. Amazon S3 returns a 200 OK response upon -// successfully deleting a website configuration on the specified bucket. You will -// get a 200 OK response if the website configuration you are trying to delete -// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket -// specified in the request does not exist. This DELETE action requires the -// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete -// the website configuration attached to a bucket. However, bucket owners can grant -// other users permission to delete the website configuration by writing a bucket -// policy granting them the S3:DeleteBucketWebsite permission. For more -// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . The following operations are related to DeleteBucketWebsite : -// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// This operation is not supported for directory buckets. +// +// This action removes the website configuration for a bucket. Amazon S3 returns a +// 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 +// response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a bucket. +// However, bucket owners can grant other users permission to delete the website +// configuration by writing a bucket policy granting them the +// S3:DeleteBucketWebsite permission. +// +// For more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// The following operations are related to DeleteBucketWebsite : +// +// [GetBucketWebsite] +// +// [PutBucketWebsite] +// +// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { if params == nil { params = &DeleteBucketWebsiteInput{} @@ -58,6 +69,7 @@ type DeleteBucketWebsiteInput struct { } func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -91,25 +103,28 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -127,6 +142,18 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -136,7 +163,7 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { @@ -160,6 +187,18 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go index cd00e599..57429807 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -11,18 +11,27 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "time" ) // Removes an object from a bucket. The behavior depends on the bucket's // versioning state: // -// - If versioning is enabled, the operation removes the null version (if there -// is one) of an object and inserts a delete marker, which becomes the latest -// version of the object. If there isn't a null version, Amazon S3 does not remove -// any objects but will still respond that the command was successful. +// - If bucket versioning is not enabled, the operation permanently deletes the +// object. // -// - If versioning is suspended or not enabled, the operation permanently -// deletes the object. +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete an object +// in a versioned bucket, you must include the object’s versionId in the request. +// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket]. +// +// - If bucket versioning is suspended, the operation removes the object that +// has a null versionId , if there is one, and inserts a delete marker that +// becomes the current version of the object. If there isn't an object with a null +// versionId , and all versions of the object have a versionId , Amazon S3 does +// not remove the object and only inserts a delete marker. To permanently delete an +// object that has a versionId , you must include the object’s versionId in the +// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets]. // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is @@ -32,37 +41,45 @@ import ( // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // To remove a specific version, you must use the versionId query parameter. Using // this query parameter permanently deletes the version. If the object deleted is a // delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. +// // If the object you want to delete is in a bucket where the bucket versioning // configuration is MFA Delete enabled, you must include the x-amz-mfa request // header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) -// in the Amazon S3 User Guide. To see sample requests that use versioning, see -// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) -// . Directory buckets - MFA delete is not supported by directory buckets. You can -// delete objects by explicitly calling DELETE Object or calling ( -// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// ) to enable Amazon S3 to remove them for you. If you want to block users or -// accounts from removing or deleting objects from your bucket, you must deny them -// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration -// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. +// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User +// Guide. To see sample requests that use versioning, see [Sample Request]. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to +// enable Amazon S3 to remove them for you. If you want to block users or accounts +// from removing or deleting objects from your bucket, you must deny them the +// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration +// actions. +// +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always have // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -70,13 +87,24 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is -// related to DeleteObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// The following action is related to DeleteObject : +// +// [PutObject] +// +// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { if params == nil { params = &DeleteObjectInput{} @@ -94,31 +122,40 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op type DeleteObjectInput struct { - // The bucket name of the bucket containing the object. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name of the bucket containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -130,8 +167,9 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // The account ID of the expected bucket owner. If the account ID that you provide @@ -139,31 +177,69 @@ type DeleteObjectInput struct { // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // The If-Match header field makes the request method conditional on ETags. If the + // ETag value does not match, the operation returns a 412 Precondition Failed + // error. If the ETag matches or if the object doesn't exist, the operation will + // return a 204 Success (No Content) response . + // + // For more information about conditional requests, see [RFC 7232]. + // + // This functionality is only supported for directory buckets. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // If present, the object is deleted only if its modification times matches the + // provided Timestamp . If the Timestamp values do not match, the operation + // returns a 412 Precondition Failed error. If the Timestamp matches or if the + // object doesn’t exist, the operation returns a 204 Success (No Content) response. + // + // This functionality is only supported for directory buckets. + IfMatchLastModifiedTime *time.Time + + // If present, the object is deleted only if its size matches the provided size in + // bytes. If the Size value does not match, the operation returns a 412 + // Precondition Failed error. If the Size matches or if the object doesn’t exist, + // the operation returns a 204 Success (No Content) response. + // + // This functionality is only supported for directory buckets. + // + // You can use the If-Match , x-amz-if-match-last-modified-time and + // x-amz-if-match-size conditional headers in conjunction with each-other or + // individually. + IfMatchSize *int64 + // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. This functionality is not supported for directory buckets. + // delete enabled. + // + // This functionality is not supported for directory buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -174,16 +250,23 @@ type DeleteObjectOutput struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Returns the version ID of the delete marker created as a result of the DELETE - // operation. This functionality is not supported for directory buckets. + // operation. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -214,25 +297,28 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -250,6 +336,18 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { return err } @@ -259,7 +357,7 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { @@ -283,6 +381,18 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go index 961b9459..f62eea47 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -12,16 +12,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the entire tag -// set from the specified object. For more information about managing object tags, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . To use this operation, you must have permission to perform the -// s3:DeleteObjectTagging action. To delete tags of a specific object version, add -// the versionId query parameter in the request. You will need permission for the -// s3:DeleteObjectVersionTagging action. The following operations are related to -// DeleteObjectTagging : -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// This operation is not supported for directory buckets. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see [Object Tagging]. +// +// To use this operation, you must have permission to perform the +// s3:DeleteObjectTagging action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteObjectTagging : +// +// [PutObjectTagging] +// +// [GetObjectTagging] +// +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { if params == nil { params = &DeleteObjectTaggingInput{} @@ -39,23 +50,28 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa type DeleteObjectTaggingInput struct { - // The bucket name containing the objects from which to remove the tags. Access - // points - When you use this action with an access point, you must provide the - // alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // The bucket name containing the objects from which to remove the tags. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -77,6 +93,7 @@ type DeleteObjectTaggingInput struct { } func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -114,25 +131,28 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -150,6 +170,18 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -159,7 +191,7 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { @@ -183,6 +215,18 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go index 1d1fa432..55b6288a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -17,47 +17,58 @@ import ( // This operation enables you to delete multiple objects from a bucket using a // single HTTP request. If you know the object keys that you want to delete, then // this operation provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. The request can contain a list of up to -// 1000 keys that you want to delete. In the XML, you provide the object key names, -// and optionally, version IDs if you want to delete a specific version of the -// object from a versioning-enabled bucket. For each key, Amazon S3 performs a -// delete operation and returns the result of that delete, success or failure, in -// the response. Note that if the object specified in the request is not found, -// Amazon S3 returns the result as deleted. +// requests, reducing per-request overhead. +// +// The request can contain a list of up to 1,000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if you +// want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. If the object +// specified in the request isn't found, Amazon S3 confirms the deletion by +// returning the result as deleted. +// // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // The operation supports two modes for the response: verbose and quiet. By // default, the operation uses verbose mode in which the response includes the // result of deletion of each key in your request. In quiet mode the response // includes only keys where the delete operation encountered an error. For a // successful deletion in a quiet mode, the operation does not return any -// information about the delete in the response body. When performing this action -// on an MFA Delete enabled bucket, that attempts to delete any versioned objects, -// you must include an MFA token. If you do not provide one, the entire request -// will fail, even if there are non-versioned objects you are trying to delete. If -// you provide an invalid token, whether there are versioned keys in the request or -// not, the entire Multi-Object Delete request will fail. For information about MFA -// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) -// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by -// directory buckets. Permissions +// information about the delete in the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts to +// delete any versioned objects, you must include an MFA token. If you do not +// provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether there +// are versioned keys in the request or not, the entire Multi-Object Delete request +// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always specify // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +// versioning-enabled bucket, you must specify the s3:DeleteObjectVersion // permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -65,26 +76,43 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Content-MD5 request header +// // - General purpose bucket - The Content-MD5 request header is required for all // Multi-Object Delete requests. Amazon S3 uses the header value to ensure that // your request body has not been altered in transit. +// // - Directory bucket - The Content-MD5 request header or a additional checksum // request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all // Multi-Object Delete requests. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to DeleteObjects : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to DeleteObjects : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { if params == nil { params = &DeleteObjectsInput{} @@ -102,31 +130,40 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, type DeleteObjectsInput struct { - // The bucket name containing the objects to delete. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name containing the objects to delete. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,28 +175,41 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . If you provide an individual - // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -170,31 +220,38 @@ type DeleteObjectsInput struct { // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. When performing the DeleteObjects operation on an MFA delete - // enabled bucket, which attempts to delete the specified versioned objects, you - // must include an MFA token. If you don't provide an MFA token, the entire request - // will fail, even if there are non-versioned objects that you are trying to - // delete. If you provide an invalid token, whether there are versioned object keys - // in the request or not, the entire Multi-Object Delete request will fail. For - // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include an + // MFA token. If you don't provide an MFA token, the entire request will fail, even + // if there are non-versioned objects that you are trying to delete. If you provide + // an invalid token, whether there are versioned object keys in the request or not, + // the entire Multi-Object Delete request will fail. For information about MFA + // Delete, see [MFA Delete]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -210,7 +267,9 @@ type DeleteObjectsOutput struct { Errors []types.Error // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -241,25 +300,28 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -277,6 +339,21 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { return err } @@ -286,7 +363,7 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { @@ -316,6 +393,18 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -345,9 +434,10 @@ func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { } func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go index 488d2a79..87e72df2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -13,17 +13,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to DeletePublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// This operation is not supported for directory buckets. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For +// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The following operations are related to DeletePublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { if params == nil { params = &DeletePublicAccessBlockInput{} @@ -55,6 +66,7 @@ type DeletePublicAccessBlockInput struct { } func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -88,25 +100,28 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -124,6 +139,18 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -133,7 +160,7 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { @@ -157,6 +184,18 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go index 9a90a88a..0ed2a867 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -14,26 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the accelerate subresource to return the Transfer -// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3 -// Transfer Acceleration is a bucket-level feature that enables you to perform -// faster data transfers to and from Amazon S3. To use this operation, you must -// have permission to perform the s3:GetAccelerateConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an -// existing bucket to Enabled or Suspended by using the -// PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) -// operation. A GET accelerate request does not return a state value for a bucket -// that has no transfer acceleration state. A bucket has no Transfer Acceleration -// state if a state has never been set on the bucket. For more information about -// transfer acceleration, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAccelerateConfiguration : -// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// This operation is not supported for directory buckets. +// +// This implementation of the GET action uses the accelerate subresource to return +// the Transfer Acceleration state of a bucket, which is either Enabled or +// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the +// s3:GetAccelerateConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled or +// Suspended by using the [PutBucketAccelerateConfiguration] operation. +// +// A GET accelerate request does not return a state value for a bucket that has no +// transfer acceleration state. A bucket has no Transfer Acceleration state if a +// state has never been set on the bucket. +// +// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAccelerateConfiguration : +// +// [PutBucketAccelerateConfiguration] +// +// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { if params == nil { params = &GetBucketAccelerateConfigurationInput{} @@ -65,16 +75,19 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,7 +95,9 @@ func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointP type GetBucketAccelerateConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // The accelerate configuration of the bucket. @@ -116,25 +131,28 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -152,6 +170,18 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -161,7 +191,7 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { @@ -185,6 +215,18 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go index 36747fc9..fed95ac0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -14,26 +14,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the acl subresource to return the access control list (ACL) -// of a bucket. To use GET to return the ACL of the bucket, you must have the -// READ_ACP access to the bucket. If READ_ACP permission is granted to the -// anonymous user, you can return the ACL of the bucket without using an -// authorization header. When you use this API operation with an access point, -// provide the alias of the access point in place of the bucket name. When you use -// this API operation with an Object Lambda access point, provide the alias of the -// Object Lambda access point in place of the bucket name. If the Object Lambda -// access point alias in a request is not valid, the error code +// This operation is not supported for directory buckets. +// +// This implementation of the GET action uses the acl subresource to return the +// access control list (ACL) of a bucket. To use GET to return the ACL of the +// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code // InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, // requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAcl : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAcl : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { if params == nil { params = &GetBucketAclInput{} @@ -51,14 +60,18 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op type GetBucketAclInput struct { - // Specifies the S3 bucket whose ACL is being requested. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // Specifies the S3 bucket whose ACL is being requested. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -72,6 +85,7 @@ type GetBucketAclInput struct { } func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,25 +126,28 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -148,6 +165,18 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { return err } @@ -157,7 +186,7 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { @@ -181,6 +210,18 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go index 0f7922ae..b0423c85 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -14,21 +14,33 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action returns an analytics configuration (identified by the analytics -// configuration ID) from the bucket. To use this operation, you must have -// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, -// see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAnalyticsConfiguration : -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// This operation is not supported for directory buckets. +// +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAnalyticsConfiguration : +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &GetBucketAnalyticsConfigurationInput{} @@ -65,6 +77,7 @@ type GetBucketAnalyticsConfigurationInput struct { } func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -102,25 +115,28 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -138,6 +154,18 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -147,7 +175,7 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { @@ -171,6 +199,18 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go index 33c25aa1..ef6a970d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -14,21 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the Cross-Origin -// Resource Sharing (CORS) configuration information set for the bucket. To use -// this operation, you must have permission to perform the s3:GetBucketCORS +// This operation is not supported for directory buckets. +// +// Returns the Cross-Origin Resource Sharing (CORS) configuration information set +// for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. When you use this API operation with an access point, provide the alias -// of the access point in place of the bucket name. When you use this API operation -// with an Object Lambda access point, provide the alias of the Object Lambda -// access point in place of the bucket name. If the Object Lambda access point -// alias in a request is not valid, the error code InvalidAccessPointAliasError is -// returned. For more information about InvalidAccessPointAliasError , see List of -// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// . The following operations are related to GetBucketCors : -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// others. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about CORS, see [Enabling Cross-Origin Resource Sharing]. +// +// The following operations are related to GetBucketCors : +// +// [PutBucketCors] +// +// [DeleteBucketCors] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { if params == nil { params = &GetBucketCorsInput{} @@ -46,14 +61,18 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, type GetBucketCorsInput struct { - // The bucket name for which to get the cors configuration. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The bucket name for which to get the cors configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -67,6 +86,7 @@ type GetBucketCorsInput struct { } func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -105,25 +125,28 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -141,6 +164,18 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -150,7 +185,7 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { @@ -174,6 +209,18 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go index c8be5dd0..82b7211a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -14,20 +14,47 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the default -// encryption configuration for an Amazon S3 bucket. By default, all buckets have a -// default encryption configuration that uses server-side encryption with Amazon S3 -// managed keys (SSE-S3). For information about the bucket default encryption -// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permission to -// perform the s3:GetEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to GetBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets - For information about the bucket default +// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. For information about +// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:GetEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:GetEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to GetBucketEncryption : +// +// [PutBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { if params == nil { params = &GetBucketEncryptionInput{} @@ -48,18 +75,34 @@ type GetBucketEncryptionInput struct { // The name of the bucket from which the server-side encryption configuration is // retrieved. // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -97,25 +140,28 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -133,6 +179,18 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -142,7 +200,7 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { @@ -166,6 +224,18 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go index a3531f98..5a500f0a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to GetBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &GetBucketIntelligentTieringConfigurationInput{} @@ -65,6 +78,7 @@ type GetBucketIntelligentTieringConfigurationInput struct { } func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -102,25 +116,28 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -138,6 +155,18 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -147,7 +176,7 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { @@ -171,6 +200,18 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go index 3fe6f986..163c86ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -14,18 +14,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns an inventory -// configuration (identified by the inventory configuration ID) from the bucket. To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . The following operations are related to GetBucketInventoryConfiguration : -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// The following operations are related to GetBucketInventoryConfiguration : +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { if params == nil { params = &GetBucketInventoryConfigurationInput{} @@ -62,6 +76,7 @@ type GetBucketInventoryConfigurationInput struct { } func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -99,25 +114,28 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -135,6 +153,18 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -144,7 +174,7 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { @@ -168,6 +198,18 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go index 4cc9eff8..5803ff5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -14,31 +14,81 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Bucket lifecycle -// configuration now supports specifying a lifecycle rule using an object key name -// prefix, one or more object tags, or a combination of both. Accordingly, this -// section describes the latest API. The response describes the new filter element -// that you can use to specify a filter to select a subset of objects to which the -// rule applies. If you are using a previous version of the lifecycle -// configuration, it still works. For the earlier action, see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// . Returns the lifecycle configuration information set on the bucket. For -// information about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// . To use this operation, you must have permission to perform the -// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . GetBucketLifecycleConfiguration has the following special error: +// Returns the lifecycle configuration information set on the bucket. For +// information about lifecycle configuration, see [Object Lifecycle Management]. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API, which +// is compatible with the new functionality. The previous version of the API +// supported filtering based only on an object key name prefix, which is supported +// for general purpose buckets for backward compatibility. For the related API +// description, see [GetBucketLifecycle]. +// +// Lifecyle configurations for directory buckets only support expiring objects and +// cancelling multipart uploads. Expiring of versioned objects, transitions and tag +// filters are not supported. +// +// Permissions +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:GetLifecycleConfiguration permission. +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:GetLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// GetBucketLifecycleConfiguration has the following special error: +// // - Error code: NoSuchLifecycleConfiguration +// // - Description: The lifecycle configuration does not exist. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // // The following operations are related to GetBucketLifecycleConfiguration : -// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// [GetBucketLifecycle] +// +// [PutBucketLifecycle] +// +// [DeleteBucketLifecycle] +// +// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { if params == nil { params = &GetBucketLifecycleConfigurationInput{} @@ -64,12 +114,16 @@ type GetBucketLifecycleConfigurationInput struct { // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -79,6 +133,25 @@ type GetBucketLifecycleConfigurationOutput struct { // Container for a lifecycle rule. Rules []types.LifecycleRule + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It isn't supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -107,25 +180,28 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -143,6 +219,18 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -152,7 +240,7 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { @@ -176,6 +264,18 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go index e94875fe..3d1397b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -20,23 +20,34 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns the Region the -// bucket resides in. You set the bucket's Region using the LocationConstraint -// request parameter in a CreateBucket request. For more information, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. When you use this API operation with -// an Object Lambda access point, provide the alias of the Object Lambda access -// point in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// to return the Region that a bucket resides in. For backward compatibility, -// Amazon S3 continues to support GetBucketLocation. The following operations are -// related to GetBucketLocation : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// This operation is not supported for directory buckets. +// +// Returns the Region the bucket resides in. You set the bucket's Region using the +// LocationConstraint request parameter in a CreateBucket request. For more +// information, see [CreateBucket]. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For +// backward compatibility, Amazon S3 continues to support GetBucketLocation. +// +// The following operations are related to GetBucketLocation : +// +// [GetObject] +// +// [CreateBucket] +// +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { if params == nil { params = &GetBucketLocationInput{} @@ -54,14 +65,18 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio type GetBucketLocationInput struct { - // The name of the bucket for which to get the location. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the location. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -75,6 +90,7 @@ type GetBucketLocationInput struct { } func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,8 +98,12 @@ func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLocationOutput struct { // Specifies the Region where the bucket resides. For a list of all the Amazon S3 - // supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // . Buckets in Region us-east-1 have a LocationConstraint of null . + // supported location constraints by Region, see [Regions and Endpoints]. + // + // Buckets in Region us-east-1 have a LocationConstraint of null . Buckets with a + // LocationConstraint of EU reside in eu-west-1 . + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region LocationConstraint types.BucketLocationConstraint // Metadata pertaining to the operation's result. @@ -114,25 +134,28 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -153,6 +176,18 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { return err } @@ -162,7 +197,7 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { @@ -186,6 +221,18 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go index 5bbca7e4..c1f315fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -14,11 +14,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the logging -// status of a bucket and the permissions users have to view and modify that -// status. The following operations are related to GetBucketLogging : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// This operation is not supported for directory buckets. +// +// Returns the logging status of a bucket and the permissions users have to view +// and modify that status. +// +// The following operations are related to GetBucketLogging : +// +// [CreateBucket] +// +// [PutBucketLogging] +// +// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { if params == nil { params = &GetBucketLoggingInput{} @@ -50,6 +58,7 @@ type GetBucketLoggingInput struct { } func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -57,8 +66,10 @@ func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLoggingOutput struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *types.LoggingEnabled // Metadata pertaining to the operation's result. @@ -89,25 +100,28 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -125,6 +139,18 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -134,7 +160,7 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { @@ -158,6 +184,18 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go new file mode 100644 index 00000000..5d5e9685 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go @@ -0,0 +1,242 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the metadata table configuration for a general purpose bucket. For +// +// more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the +// s3:GetBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketMetadataTableConfiguration : +// +// [CreateBucketMetadataTableConfiguration] +// +// [DeleteBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html +// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +func (c *Client) GetBucketMetadataTableConfiguration(ctx context.Context, params *GetBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*GetBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &GetBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketMetadataTableConfiguration", params, optFns, c.addOperationGetBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that contains the metadata table configuration that + // you want to retrieve. + // + // This member is required. + Bucket *string + + // The expected owner of the general purpose bucket that you want to retrieve the + // metadata table configuration from. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketMetadataTableConfigurationOutput struct { + + // The metadata table configuration for the general purpose bucket. + GetBucketMetadataTableConfigurationResult *types.GetBucketMetadataTableConfigurationResult + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *GetBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketMetadataTableConfiguration", + } +} + +// getGetBucketMetadataTableConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go index 913b9a04..4fdf2ef4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -14,21 +14,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets a metrics -// configuration (specified by the metrics configuration ID) from the bucket. Note -// that this doesn't include the daily storage metrics. To use this operation, you -// must have permissions to perform the s3:GetMetricsConfiguration action. The -// bucket owner has this permission by default. The bucket owner can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to GetBucketMetricsConfiguration : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported for directory buckets. +// +// Gets a metrics configuration (specified by the metrics configuration ID) from +// the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to GetBucketMetricsConfiguration : +// +// [PutBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { if params == nil { params = &GetBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type GetBucketMetricsConfigurationInput struct { } func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -103,25 +117,28 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -139,6 +156,18 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -148,7 +177,7 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { @@ -172,6 +201,18 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go index 67a35d97..1fb7d164 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -14,24 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the notification -// configuration of a bucket. If notifications are not enabled on the bucket, the -// action returns an empty NotificationConfiguration element. By default, you must -// be the bucket owner to read the notification configuration of a bucket. However, -// the bucket owner can use a bucket policy to grant permission to other users to -// read this configuration with the s3:GetBucketNotification permission. When you -// use this API operation with an access point, provide the alias of the access -// point in place of the bucket name. When you use this API operation with an -// Object Lambda access point, provide the alias of the Object Lambda access point -// in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about setting and reading the notification configuration -// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . The following action is related to GetBucketNotification : -// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// This operation is not supported for directory buckets. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant +// permission to other users to read this configuration with the +// s3:GetBucketNotification permission. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about setting and reading the notification configuration +// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies]. +// +// The following action is related to GetBucketNotification : +// +// [PutBucketNotification] +// +// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { if params == nil { params = &GetBucketNotificationConfigurationInput{} @@ -49,15 +63,18 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params type GetBucketNotificationConfigurationInput struct { - // The name of the bucket for which to get the notification configuration. When - // you use this API operation with an access point, provide the alias of the access - // point in place of the bucket name. When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the notification configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -71,6 +88,7 @@ type GetBucketNotificationConfigurationInput struct { } func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -122,25 +140,28 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -158,6 +179,18 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -167,7 +200,7 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { @@ -191,6 +224,18 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go index dca55854..cdc07a58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -14,14 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:GetBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// . The following operations are related to GetBucketOwnershipControls : -// - PutBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to GetBucketOwnershipControls : +// +// # PutBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { if params == nil { params = &GetBucketOwnershipControlsInput{} @@ -53,6 +61,7 @@ type GetBucketOwnershipControlsInput struct { } func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -91,25 +100,28 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -127,6 +139,18 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -136,7 +160,7 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { @@ -160,6 +184,18 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go index ff42b705..4a352aa8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -13,47 +13,63 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the GetBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the GetBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:GetBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:GetBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following action is related to GetBucketPolicy : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// The following action is related to GetBucketPolicy : +// +// [GetObject] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { params = &GetBucketPolicyInput{} @@ -71,39 +87,48 @@ func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInp type GetBucketPolicyInput struct { - // The bucket name to get the bucket policy for. Directory buckets - When you use - // this operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // The bucket name to get the bucket policy for. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide Access points - When you use this API operation with - // an access point, provide the alias of the access point in place of the bucket - // name. Object Lambda access points - When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory - // buckets. + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Object Lambda access points are not supported by directory buckets. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -141,25 +166,28 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -177,6 +205,18 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -186,7 +226,7 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { @@ -210,6 +250,18 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go index 6acf706f..db63caf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -14,18 +14,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the policy -// status for an Amazon S3 bucket, indicating whether the bucket is public. In -// order to use this operation, you must have the s3:GetBucketPolicyStatus -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For more information about when Amazon S3 considers a bucket public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetBucketPolicyStatus : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// This operation is not supported for directory buckets. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the +// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 +// permissions, see [Specifying Permissions in a Policy]. +// +// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"]. +// +// The following operations are related to GetBucketPolicyStatus : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { if params == nil { params = &GetBucketPolicyStatusInput{} @@ -57,6 +70,7 @@ type GetBucketPolicyStatusInput struct { } func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -94,25 +108,28 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -130,6 +147,18 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { return err } @@ -139,7 +168,7 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { @@ -163,6 +192,18 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go index 8db927c1..ebc0d35c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -14,21 +14,37 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the replication -// configuration of a bucket. It can take a while to propagate the put or delete a -// replication configuration to all Amazon S3 systems. Therefore, a get request -// soon after put or delete can return a wrong result. For information about -// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. This action requires permissions for the -// s3:GetReplicationConfiguration action. For more information about permissions, -// see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . If you include the Filter element in a replication configuration, you must -// also include the DeleteMarkerReplication and Priority elements. The response -// also returns those elements. For information about GetBucketReplication errors, -// see List of replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// This operation is not supported for directory buckets. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete can +// return a wrong result. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see [Using Bucket Policies and User Policies]. +// +// If you include the Filter element in a replication configuration, you must also +// include the DeleteMarkerReplication and Priority elements. The response also +// returns those elements. +// +// For information about GetBucketReplication errors, see [List of replication-related error codes] +// // The following operations are related to GetBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// [PutBucketReplication] +// +// [DeleteBucketReplication] +// +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { if params == nil { params = &GetBucketReplicationInput{} @@ -60,6 +76,7 @@ type GetBucketReplicationInput struct { } func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -98,25 +115,28 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -134,6 +154,18 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -143,7 +175,7 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { @@ -167,6 +199,18 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go index 37c96450..34563cb0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -14,11 +14,17 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the request -// payment configuration of a bucket. To use this version of the operation, you -// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to GetBucketRequestPayment : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// This operation is not supported for directory buckets. +// +// Returns the request payment configuration of a bucket. To use this version of +// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to GetBucketRequestPayment : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { if params == nil { params = &GetBucketRequestPaymentInput{} @@ -50,6 +56,7 @@ type GetBucketRequestPaymentInput struct { } func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -87,25 +94,28 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -123,6 +133,18 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -132,7 +154,7 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { @@ -156,6 +178,18 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go index 4c2761be..a1d8d99c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -14,17 +14,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag set -// associated with the bucket. To use this operation, you must have permission to -// perform the s3:GetBucketTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. GetBucketTagging has the -// following special error: +// This operation is not supported for directory buckets. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the +// s3:GetBucketTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. +// +// GetBucketTagging has the following special error: +// // - Error code: NoSuchTagSet +// // - Description: There is no tag set associated with the bucket. // // The following operations are related to GetBucketTagging : -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [PutBucketTagging] +// +// [DeleteBucketTagging] +// +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { if params == nil { params = &GetBucketTaggingInput{} @@ -56,6 +67,7 @@ type GetBucketTaggingInput struct { } func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -95,25 +107,28 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -131,6 +146,18 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -140,7 +167,7 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { @@ -164,6 +191,18 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go index 55cad629..6e69fd46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -14,15 +14,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the versioning -// state of a bucket. To retrieve the versioning state of a bucket, you must be the -// bucket owner. This implementation also returns the MFA Delete status of the -// versioning state. If the MFA Delete status is enabled , the bucket owner must -// use an authentication device to change the versioning state of the bucket. The -// following operations are related to GetBucketVersioning : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// This operation is not supported for directory buckets. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning state. +// If the MFA Delete status is enabled , the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning : +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { if params == nil { params = &GetBucketVersioningInput{} @@ -54,6 +66,7 @@ type GetBucketVersioningInput struct { } func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -96,25 +109,28 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -132,6 +148,18 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -141,7 +169,7 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { @@ -165,6 +193,18 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go index f0ebf2b0..ce0847c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -14,17 +14,26 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the website -// configuration for a bucket. To host website on Amazon S3, you can configure a -// bucket as website by adding a website configuration. For more information about -// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This GET action requires the S3:GetBucketWebsite permission. By default, only +// This operation is not supported for directory buckets. +// +// Returns the website configuration for a bucket. To host website on Amazon S3, +// you can configure a bucket as website by adding a website configuration. For +// more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// This GET action requires the S3:GetBucketWebsite permission. By default, only // the bucket owner can read the bucket website configuration. However, bucket // owners can allow other users to read the website configuration by writing a -// bucket policy granting them the S3:GetBucketWebsite permission. The following -// operations are related to GetBucketWebsite : -// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to GetBucketWebsite : +// +// [DeleteBucketWebsite] +// +// [PutBucketWebsite] +// +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { if params == nil { params = &GetBucketWebsiteInput{} @@ -56,6 +65,7 @@ type GetBucketWebsiteInput struct { } func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -103,25 +113,28 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -139,6 +152,18 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -148,7 +173,7 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { @@ -172,6 +197,18 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go index d2dc15c7..c608cee6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -16,100 +16,154 @@ import ( "time" ) -// Retrieves an object from Amazon S3. In the GetObject request, specify the full -// key name for the object. General purpose buckets - Both the virtual-hosted-style -// requests and the path-style requests are supported. For a virtual hosted-style -// request example, if you have the object photos/2006/February/sample.jpg , -// specify the object key name as /photos/2006/February/sample.jpg . For a -// path-style request example, if you have the object -// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the -// object key name as /examplebucket/photos/2006/February/sample.jpg . For more -// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) -// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style -// requests are supported. For a virtual hosted-style request example, if you have -// the object photos/2006/February/sample.jpg in the bucket named -// examplebucket--use1-az5--x-s3 , specify the object key name as +// Retrieves an object from Amazon S3. +// +// In the GetObject request, specify the full key name for the object. +// +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg , specify the object key +// name as /photos/2006/February/sample.jpg . For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket , specify the object key name as +// /examplebucket/photos/2006/February/sample.jpg . For more information about +// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide. +// +// Directory buckets - Only virtual-hosted-style requests are supported. For a +// virtual hosted-style request example, if you have the object +// photos/2006/February/sample.jpg in the bucket named +// amzn-s3-demo-bucket--usw2-az1--x-s3 , specify the object key name as // /photos/2006/February/sample.jpg . Also, when you make requests to this API // operation, your requests are sent to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . +// Path-style requests are not supported. For more information about endpoints in +// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about +// endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - You must have the required permissions // in a policy. To use GetObject , you must have the READ access to the object // (or version). If you grant READ access to the anonymous user, the GetObject // operation returns the object without using an authorization header. For more -// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If you include a versionId in your request -// header, you must have the s3:GetObjectVersion permission to access a specific -// version of an object. The s3:GetObject permission is not required in this -// scenario. If you request the current version of an object without a specific -// versionId in the request header, only the s3:GetObject permission is required. -// The s3:GetObjectVersion permission is not required in this scenario. If the -// object that you request doesn’t exist, the error that Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Access Denied error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide. +// +// If you include a versionId in your request header, you must have the +// +// s3:GetObjectVersion permission to access a specific version of an object. The +// s3:GetObject permission is not required in this scenario. +// +// If you request the current version of an object without a specific versionId in +// +// the request header, only the s3:GetObject permission is required. The +// s3:GetObjectVersion permission is not required in this scenario. +// +// If the object that you request doesn’t exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted using SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Storage classes If the object you are retrieving is stored in the S3 Glacier // Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the // S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep // Archive Access tier, before you can retrieve the object you must first restore a -// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the -// S3 Express One Zone storage class is supported to store newly created objects. -// Unsupported storage class values won't write a destination object and will -// respond with the HTTP status code 400 Bad Request . Encryption Encryption -// request headers, like x-amz-server-side-encryption , should not be sent for the -// GetObject requests, if your object uses server-side encryption with Amazon S3 -// managed encryption keys (SSE-S3), server-side encryption with Key Management -// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon -// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject -// requests for the object that uses these types of keys, you’ll get an HTTP 400 -// Bad Request error. Overriding response header values through the request There -// are times when you want to override certain response header values of a -// GetObject response. For example, you might override the Content-Disposition -// response header value through your GetObject request. You can override values -// for a set of response headers. These modified response header values are -// included only in a successful response, that is, when the HTTP status code 200 -// OK is returned. The headers you can override using the following query -// parameters in the request are a subset of the headers that Amazon S3 accepts -// when you create an object. The response headers that you can override for the -// GetObject response are Cache-Control , Content-Disposition , Content-Encoding , -// Content-Language , Content-Type , and Expires . To override values for a set of -// response headers in the GetObject response, you can use the following query -// parameters in the request. +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 +// Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 +// One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported +// storage class values won't write a destination object and will respond with the +// HTTP status code 400 Bad Request . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for the GetObject requests, if your object uses server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you +// include the header in your GetObject requests for the object that uses these +// types of keys, you’ll get an HTTP 400 Bad Request error. +// +// Directory buckets - For directory buckets, there are only two supported options +// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// Overriding response header values through the request There are times when you +// want to override certain response header values of a GetObject response. For +// example, you might override the Content-Disposition response header value +// through your GetObject request. +// +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the HTTP +// status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. +// +// The response headers that you can override for the GetObject response are +// Cache-Control , Content-Disposition , Content-Encoding , Content-Language , +// Content-Type , and Expires . +// +// To override values for a set of response headers in the GetObject response, you +// can use the following query parameters in the request. +// // - response-cache-control +// // - response-content-disposition +// // - response-content-encoding +// // - response-content-language +// // - response-content-type +// // - response-expires // // When you use these parameters, you must sign the request by using either an // Authorization header or a presigned URL. These parameters cannot be used with an -// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// unsigned (anonymous) request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// // The following operations are related to GetObject : -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [ListBuckets] +// +// [GetObjectAcl] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { if params == nil { params = &GetObjectInput{} @@ -127,35 +181,45 @@ func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns . type GetObjectInput struct { - // The bucket name containing the object. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this - // action with an Object Lambda access point, you must direct requests to the - // Object Lambda access point hostname. The Object Lambda access point hostname - // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. - // Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form + // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -174,37 +238,55 @@ type GetObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified in this header; otherwise, return a 412 Precondition Failed error. If - // both of the If-Match and If-Unmodified-Since headers are present in the request - // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since - // condition evaluates to false ; then, S3 returns 200 OK and the data requested. - // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 Not Modified error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: If-None-Match - // condition evaluates to false , and; If-Modified-Since condition evaluates to - // true ; then, S3 returns 304 Not Modified status code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified in this header; otherwise, return a 304 Not Modified error. If both - // of the If-None-Match and If-Modified-Since headers are present in the request - // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since - // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status - // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified HTTP status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 Precondition Failed error. If both of the If-Match and - // If-Unmodified-Since headers are present in the request as follows: If-Match - // condition evaluates to true , and; If-Unmodified-Since condition evaluates to - // false ; then, S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -213,18 +295,23 @@ type GetObjectInput struct { PartNumber *int32 // Downloads the specified byte range of an object. For more information about the - // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) - // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]. + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range Range *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Sets the Cache-Control header of the response. @@ -245,72 +332,97 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time - // Specifies the algorithm to use when decrypting the object (for example, AES256 - // ). If you encrypt an object by using server-side encryption with - // customer-provided encryption keys (SSE-C) when you store the object in Amazon - // S3, then when you GET the object, you must use the following headers: + // Specifies the algorithm to use when decrypting the object (for example, AES256 ). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // Specifies the customer-provided encryption key that you originally provided for // Amazon S3 to encrypt the data before storing it. This value is used to decrypt // the object when recovering it and must match the one used when storing the data. // The key must be appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object - // by using server-side encryption with customer-provided encryption keys (SSE-C) - // when you store the object in Amazon S3, then when you GET the object, you must - // use the following headers: + // x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. If you encrypt - // an object by using server-side encryption with customer-provided encryption keys - // (SSE-C) when you store the object in Amazon S3, then when you GET the object, - // you must use the following headers: + // to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. By default, the - // GetObject operation returns the current version of an object. To return a - // different version, use the versionId subresource. + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // // - If you include a versionId in your request header, you must have the // s3:GetObjectVersion permission to access a specific version of an object. The // s3:GetObject permission is not required in this scenario. + // // - If you request the current version of an object without a specific versionId // in the request header, only the s3:GetObject permission is required. The // s3:GetObjectVersion permission is not required in this scenario. + // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. - // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) - // . + // + // For more information about versioning, see [PutBucketVersioning]. + // + // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html VersionId *string noSmithyDocumentSerde } func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -325,37 +437,55 @@ type GetObjectOutput struct { Body io.ReadCloser // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more + // information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in the CreateMultipartUpload request. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string @@ -378,9 +508,11 @@ type GetObjectOutput struct { // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the // response. + // // - If the specified version in the request is a delete marker, the response // returns a 405 Method Not Allowed error and the Last-Modified: timestamp // response header. @@ -390,20 +522,35 @@ type GetObjectOutput struct { // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory + // rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time - // Date and time when the object was last modified. General purpose buckets - When - // you specify a versionId of the object in your request, if the specified version - // in the request is a delete marker, the response returns a 405 Method Not Allowed - // error and the Last-Modified: timestamp response header. + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. LastModified *time.Time // A map of metadata to store with the object in S3. @@ -415,20 +562,25 @@ type GetObjectOutput struct { // are prefixed with x-amz-meta- . This can happen if you create metadata using an // API like SOAP that supports more flexible metadata than the REST API. For // example, using SOAP, you can create metadata whose values are not legal HTTP - // headers. This functionality is not supported for directory buckets. + // headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. This - // functionality is not supported for directory buckets. + // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that's currently in place for this object. This - // functionality is not supported for directory buckets. + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when this object's Object Lock will expire. This - // functionality is not supported for directory buckets. + // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -436,63 +588,75 @@ type GetObjectOutput struct { PartsCount *int32 // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. This functionality is not supported - // for directory buckets. + // source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration action and expiration time of the - // restored object copy. This functionality is not supported for directory buckets. - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // restored object copy. + // + // This functionality is not supported for directory buckets. Directory buckets + // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in + // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage + // class) in Dedicated Local Zones. Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. Directory buckets - // - Only the S3 Express One Zone storage class is supported by directory buckets - // to store objects. + // for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass types.StorageClass // The number of tags, if any, on the object, when you have the relevant - // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) - // to retrieve the tag set associated with an object. This functionality is not - // supported for directory buckets. + // permission to read object tags. + // + // You can use [GetObjectTagging] to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. + // + // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html TagCount *int32 - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -523,25 +687,28 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -556,6 +723,21 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addResponseChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectValidationMiddleware(stack); err != nil { return err } @@ -565,7 +747,7 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { @@ -592,6 +774,18 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -620,13 +814,20 @@ func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { return string(in.ChecksumMode), true } +func setGetObjectRequestValidationModeMember(input interface{}, mode string) { + in := input.(*GetObjectInput) + in.ChecksumMode = types.ChecksumMode(mode) +} + func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ GetValidationMode: getGetObjectRequestValidationModeMember, - ValidationAlgorithms: []string{"CRC32", "CRC32C", "SHA256", "SHA1"}, + SetValidationMode: setGetObjectRequestValidationModeMember, + ResponseChecksumValidation: options.ResponseChecksumValidation, + ValidationAlgorithms: []string{"CRC64NVME", "CRC32", "CRC32C", "SHA256", "SHA1"}, IgnoreMultipartValidation: true, - LogValidationSkipped: true, - LogMultipartValidationSkipped: true, + LogValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, + LogMultipartValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, }) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go index 3b2a1687..494c7cfd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -13,24 +13,39 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the access -// control list (ACL) of an object. To use this operation, you must have -// s3:GetObjectAcl permissions or READ_ACP access to the object. For more -// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) -// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on -// Outposts. By default, GET returns ACL information about the current version of -// an object. To return ACL information about a different version, use the -// versionId subresource. If your bucket uses the bucket owner enforced setting for -// S3 Object Ownership, requests to read ACLs are still supported and return the +// This operation is not supported for directory buckets. +// +// Returns the access control list (ACL) of an object. To use this operation, you +// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For +// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId +// subresource. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetObjectAcl : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetObjectAcl : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [DeleteObject] +// +// [PutObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { if params == nil { params = &GetObjectAclInput{} @@ -49,15 +64,19 @@ func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, op type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -76,20 +95,24 @@ type GetObjectAclInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -100,11 +123,13 @@ type GetObjectAclOutput struct { // A list of grants. Grants []types.Grant - // Container for the bucket owner's display name and ID. + // Container for the bucket owner's display name and ID. Owner *types.Owner // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -135,25 +160,28 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -171,6 +199,18 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { return err } @@ -180,7 +220,7 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { @@ -204,6 +244,18 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go index ed53ae7b..9588a99e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -16,32 +16,40 @@ import ( // Retrieves all the metadata from an object without returning the object itself. // This operation is useful if you're interested only in an object's metadata. +// // GetObjectAttributes combines the functionality of HeadObject and ListParts . All // of the data returned with each of those individual calls can be returned with a -// single call to GetObjectAttributes . Directory buckets - For directory buckets, -// you must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// single call to GetObjectAttributes . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - To use GetObjectAttributes , you must // have READ access to the object. The permissions that you need to use this -// operation with depend on whether the bucket is versioned. If the bucket is -// versioned, you need both the s3:GetObjectVersion and -// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is -// not versioned, you need the s3:GetObject and s3:GetObjectAttributes -// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// operation depend on whether the bucket is versioned. If the bucket is versioned, +// you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes +// permissions for this operation. If the bucket is not versioned, you need the +// s3:GetObject and s3:GetObjectAttributes permissions. For more information, see [Specifying Permissions in a Policy] // in the Amazon S3 User Guide. If the object that you request does not exist, the // error Amazon S3 returns depends on whether you also have the s3:ListBucket // permission. +// // - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an // HTTP status code 404 Not Found ("no such key") error. +// // - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP // status code 403 Forbidden ("access denied") error. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -49,8 +57,12 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -61,49 +73,96 @@ import ( // want to specify the encryption method. If you include this header in a GET // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when you +// retrieve the metadata from the object, you must use the following headers to +// provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't -// enabled and supported for directory buckets. For this API operation, only the -// null value of the version ID is supported by directory buckets. You can only -// specify null to the versionId query parameter in the request. Conditional -// request headers Consider the following when using request headers: +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, there are only two +// supported options for server-side encryption: server-side encryption with Amazon +// S3 managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses +// the desired encryption configuration and you don't override the bucket default +// encryption in your CreateSession requests or PUT object requests. Then, new +// objects are automatically encrypted with the desired encryption settings. For +// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about +// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// Versioning Directory buckets - S3 Versioning isn't enabled and supported for +// directory buckets. For this API operation, only the null value of the version +// ID is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// Conditional request headers Consider the following when using request headers: +// // - If both of the If-Match and If-Unmodified-Since headers are present in the // request as follows, then Amazon S3 returns the HTTP status code 200 OK and the // data requested: +// // - If-Match condition evaluates to true . -// - If-Unmodified-Since condition evaluates to false . For more information -// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) -// . +// +// - If-Unmodified-Since condition evaluates to false . +// +// For more information about conditional requests, see [RFC 7232]. +// // - If both of the If-None-Match and If-Modified-Since headers are present in // the request as follows, then Amazon S3 returns the HTTP status code 304 Not // Modified : +// // - If-None-Match condition evaluates to false . -// - If-Modified-Since condition evaluates to true . For more information about -// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to GetObjectAttributes : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) -// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) -// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - If-Modified-Since condition evaluates to true . +// +// For more information about conditional requests, see [RFC 7232]. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following actions are related to GetObjectAttributes : +// +// [GetObject] +// +// [GetObjectAcl] +// +// [GetObjectLegalHold] +// +// [GetObjectLockConfiguration] +// +// [GetObjectRetention] +// +// [GetObjectTagging] +// +// [HeadObject] +// +// [ListParts] +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [RFC 7232]: https://tools.ietf.org/html/rfc7232 +// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html +// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { if params == nil { params = &GetObjectAttributesInput{} @@ -121,31 +180,40 @@ func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttri type GetObjectAttributesInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -177,32 +245,38 @@ type GetObjectAttributesInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // The version ID used to reference a specific version of the object. S3 - // Versioning isn't enabled and supported for directory buckets. For this API + // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this API // operation, only the null value of the version ID is supported by directory // buckets. You can only specify null to the versionId query parameter in the // request. @@ -212,6 +286,7 @@ type GetObjectAttributesInput struct { } func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -223,14 +298,18 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was ( true ) or was not ( false ) a // delete marker. If false , this response header does not appear in the response. + // To learn more about delete markers, see [Working with delete markers]. + // // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // An ETag is an opaque identifier assigned by a web server to a specific version // of a resource found at a URL. ETag *string - // The creation date of the object. + // Date and time when the object was last modified. LastModified *time.Time // A collection of parts associated with a multipart upload. @@ -240,18 +319,26 @@ type GetObjectAttributesOutput struct { ObjectSize *int64 // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides the storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The version ID of the object. This functionality is not supported for directory - // buckets. + // The version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -282,25 +369,28 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -318,6 +408,18 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { return err } @@ -327,7 +429,7 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { @@ -351,6 +453,18 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go index 3f7af220..497b5b31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets an object's current -// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectLegalHold : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Gets an object's current legal hold status. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectLegalHold : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { if params == nil { params = &GetObjectLegalHoldInput{} @@ -36,15 +43,20 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct + // retrieve. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +75,12 @@ type GetObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object whose legal hold status you want to retrieve. @@ -76,6 +90,7 @@ type GetObjectLegalHoldInput struct { } func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -113,25 +128,28 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -149,6 +167,18 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -158,7 +188,7 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { @@ -182,6 +212,18 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go index 1ba436d5..889fcca6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -13,12 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the Object Lock -// configuration for a bucket. The rule specified in the Object Lock configuration -// will be applied by default to every new object placed in the specified bucket. -// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . The following action is related to GetObjectLockConfiguration : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object placed +// in the specified bucket. For more information, see [Locking Objects]. +// +// The following action is related to GetObjectLockConfiguration : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { if params == nil { params = &GetObjectLockConfigurationInput{} @@ -36,16 +42,20 @@ func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObje type GetObjectLockConfigurationInput struct { - // The bucket whose Object Lock configuration you want to retrieve. Access points - // - When you use this action with an access point, you must provide the alias of - // the access point in place of the bucket name or specify the access point ARN. - // When using the access point ARN, you must direct requests to the access point - // hostname. The access point hostname takes the form + // The bucket whose Object Lock configuration you want to retrieve. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -59,6 +69,7 @@ type GetObjectLockConfigurationInput struct { } func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -96,25 +107,28 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -132,6 +146,18 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -141,7 +167,7 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { @@ -165,6 +191,18 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go index 1dea7d8a..3c5698e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves an object's -// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectRetention : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Retrieves an object's retention settings. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectRetention : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { if params == nil { params = &GetObjectRetentionInput{} @@ -36,15 +43,20 @@ func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetent type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct + // retrieve. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +75,12 @@ type GetObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID for the object whose retention settings you want to retrieve. @@ -76,6 +90,7 @@ type GetObjectRetentionInput struct { } func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -113,25 +128,28 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -149,6 +167,18 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -158,7 +188,7 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { @@ -182,6 +212,18 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go index c020e9bd..6535a1ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -13,20 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag-set of an -// object. You send the GET request against the tagging subresource associated with -// the object. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the // s3:GetObjectTagging action. By default, the GET action returns information about // current version of an object. For a versioned bucket, you can have multiple // versions of an object in your bucket. To retrieve tags of any other version, use // the versionId query parameter. You also need permission for the -// s3:GetObjectVersionTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. For information about the -// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . The following actions are related to GetObjectTagging : -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// s3:GetObjectVersionTagging action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see [Object Tagging]. +// +// The following actions are related to GetObjectTagging : +// +// [DeleteObjectTagging] +// +// [GetObjectAttributes] +// +// [PutObjectTagging] +// +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { if params == nil { params = &GetObjectTaggingInput{} @@ -45,22 +60,27 @@ func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingI type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -79,10 +99,12 @@ type GetObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object for which to get the tagging information. @@ -92,6 +114,7 @@ type GetObjectTaggingInput struct { } func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -134,25 +157,28 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -170,6 +196,18 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -179,7 +217,7 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { @@ -203,6 +241,18 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go index 6689ef97..5959ce73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -14,14 +14,24 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns torrent files -// from a bucket. BitTorrent can save you bandwidth when you're distributing large -// files. You can get torrent only for objects that are less than 5 GB in size, and -// that are not encrypted using server-side encryption with a customer-provided -// encryption key. To use GET, you must have READ access to the object. This -// functionality is not supported for Amazon S3 on Outposts. The following action -// is related to GetObjectTorrent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// This operation is not supported for directory buckets. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. +// +// You can get torrent only for objects that are less than 5 GB in size, and that +// are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectTorrent : +// +// [GetObject] +// +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { if params == nil { params = &GetObjectTorrentInput{} @@ -58,16 +68,19 @@ type GetObjectTorrentInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -78,7 +91,9 @@ type GetObjectTorrentOutput struct { Body io.ReadCloser // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -109,25 +124,28 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -142,6 +160,18 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { return err } @@ -151,7 +181,7 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { @@ -175,6 +205,18 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go index 0ae12e39..97005c10 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -14,22 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:GetBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported for directory buckets. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For +// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock settings are different between the bucket and the account, // Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetPublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to GetPublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [PutPublicAccessBlock] +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { if params == nil { params = &GetPublicAccessBlockInput{} @@ -62,6 +78,7 @@ type GetPublicAccessBlockInput struct { } func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -100,25 +117,28 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -136,6 +156,18 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -145,7 +177,7 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { @@ -169,6 +201,18 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go index 8a18ff85..a76f0313 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -19,42 +19,59 @@ import ( // You can use this operation to determine if a bucket exists and if you have // permission to access it. The action returns a 200 OK if the bucket exists and -// you have permission to access it. If the bucket does not exist or you do not -// have permission to access it, the HEAD request returns a generic 400 Bad Request -// , 403 Forbidden or 404 Not Found code. A message body is not included, so you -// cannot determine the exception beyond these error codes. Directory buckets - You -// must make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory bucket - You must use IAM credentials to authenticate and authorize +// you have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, the +// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found +// code. A message body is not included, so you cannot determine the exception +// beyond these HTTP response codes. +// +// Authentication and authorization General purpose buckets - Request to public +// buckets that grant the s3:ListBucket permission publicly do not need to be +// signed. All other HeadBucket requests must be authenticated and signed by using +// IAM credentials (access key ID and secret access key for the IAM identities). +// All headers with the x-amz- prefix, including x-amz-copy-source , must be +// signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use IAM credentials to authenticate and authorize // your access to the HeadBucket API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions +// // - General purpose bucket permissions - To use this operation, you must have // permissions to perform the s3:ListBucket action. The bucket owner has this // permission by default and can grant this permission to others. For more -// information about permissions, see Managing access permissions to your Amazon -// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - You must have the s3express:CreateSession // permission in the Action element of a policy. By default, the session is in // the ReadWrite mode. If you want to restrict the access, you can explicitly set -// the s3express:SessionMode condition key to ReadOnly on the bucket. For more -// information about example bucket policies, see Example bucket policies for S3 -// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// the s3express:SessionMode condition key to ReadOnly on the bucket. +// +// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 +// +// User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . +// You must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style +// requests are not supported. For more information about endpoints in Availability +// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints in +// Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { if params == nil { params = &HeadBucketInput{} @@ -72,36 +89,47 @@ func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns type HeadBucketInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this API - // operation with an Object Lambda access point, provide the alias of the Object - // Lambda access point in place of the bucket name. If the Object Lambda access - // point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -115,6 +143,7 @@ type HeadBucketInput struct { } func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -122,21 +151,25 @@ func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { type HeadBucketOutput struct { // Indicates whether the bucket name used in the request is an access point alias. - // This functionality is not supported for directory buckets. + // + // For directory buckets, the value of this field is false . AccessPointAlias *bool - // The name of the location where the bucket will be created. For directory - // buckets, the AZ ID of the Availability Zone where the bucket is created. An - // example AZ ID value is usw2-az2 . This functionality is only supported by - // directory buckets. + // The name of the location where the bucket will be created. + // + // For directory buckets, the Zone ID of the Availability Zone or the Local Zone + // where the bucket is created. An example Zone ID value for an Availability Zone + // is usw2-az1 . + // + // This functionality is only supported by directory buckets. BucketLocationName *string - // The type of location where the bucket is created. This functionality is only - // supported by directory buckets. + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. BucketLocationType types.LocationType - // The Region that the bucket is located. This functionality is not supported for - // directory buckets. + // The Region that the bucket is located. BucketRegion *string // Metadata pertaining to the operation's result. @@ -167,25 +200,28 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -203,6 +239,18 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpHeadBucketValidationMiddleware(stack); err != nil { return err } @@ -212,7 +260,7 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { @@ -236,31 +284,38 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *HeadBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// HeadBucketAPIClient is a client that implements the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ HeadBucketAPIClient = (*Client)(nil) - // BucketExistsWaiterOptions are waiter options for BucketExistsWaiter type BucketExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -276,12 +331,13 @@ type BucketExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -357,7 +413,16 @@ func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBuck } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -403,6 +468,9 @@ func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, out } } + if err != nil { + return false, err + } return true, nil } @@ -412,8 +480,17 @@ type BucketNotExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -429,12 +506,13 @@ type BucketNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -511,7 +589,16 @@ func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadB } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -553,9 +640,26 @@ func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, } } + if err != nil { + return false, err + } return true, nil } +func (v *HeadBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadBucketAPIClient is a client that implements the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) +} + +var _ HeadBucketAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go index 5f7b55e5..b75be08a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -19,43 +19,53 @@ import ( // The HEAD operation retrieves metadata from an object without returning the // object itself. This operation is useful if you're interested only in an object's -// metadata. A HEAD request has the same options as a GET operation on an object. -// The response is identical to the GET response except that there is no response +// metadata. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response // body. Because of this, if the HEAD request generates an error, it returns a // generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 // Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not -// possible to retrieve the exact exception of these error codes. Request headers -// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) -// . Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers]. +// +// Permissions +// // - General purpose bucket permissions - To use HEAD , you must have the // s3:GetObject permission. You need the relevant read object (or version) -// permission for this operation. For more information, see Actions, resources, -// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) -// in the Amazon S3 User Guide. If the object you request doesn't exist, the error -// that Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3 +// User Guide. For more information about the permissions to S3 API operations by +// S3 resource types, see Required permissions for Amazon S3 API operationsin the Amazon S3 User Guide. +// +// If the object you request doesn't exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If you enable x-amz-checksum-mode in the request and the object is encrypted +// +// with Amazon Web Services Key Management Service (Amazon Web Services KMS), you +// must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM +// identity-based policies and KMS key policies for the KMS key to retrieve the +// checksum of the object. // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -66,20 +76,27 @@ import ( // want to specify the encryption method. If you include this header in a HEAD // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when you +// retrieve the metadata from the object, you must use the following headers to +// provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket - For directory buckets, there are only two supported options +// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// Versioning // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -88,18 +105,40 @@ import ( // - If the specified version is a delete marker, the response returns a 405 // Method Not Allowed error and the Last-Modified: timestamp response header. // -// - Directory buckets - Delete marker is not supported by directory buckets. +// - Directory buckets - Delete marker is not supported for directory buckets. // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to HeadObject : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// For directory buckets, you must make requests for this API operation to the +// Zonal endpoint. These endpoints support virtual-hosted-style requests in the +// format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// The following actions are related to HeadObject : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { if params == nil { params = &HeadObjectInput{} @@ -117,31 +156,40 @@ func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns type HeadObjectInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -151,10 +199,19 @@ type HeadObjectInput struct { // This member is required. Key *string - // To retrieve the checksum, this parameter must be enabled. In addition, if you - // enable ChecksumMode and the object is encrypted with Amazon Web Services Key - // Management Service (Amazon Web Services KMS), you must have permission to use - // the kms:Decrypt action for the request to succeed. + // To retrieve the checksum, this parameter must be enabled. + // + // General purpose buckets - If you enable checksum mode and the object is + // uploaded with a [checksum]and encrypted with an Key Management Service (KMS) key, you + // must have permission to use the kms:Decrypt action to retrieve the checksum. + // + // Directory buckets - If you enable ChecksumMode and the object is encrypted with + // Amazon Web Services Key Management Service (Amazon Web Services KMS), you must + // also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM + // identity-based policies and KMS key policies for the KMS key to retrieve the + // checksum of the object. + // + // [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html ChecksumMode types.ChecksumMode // The account ID of the expected bucket owner. If the account ID that you provide @@ -163,40 +220,71 @@ type HeadObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. If both of the - // If-Match and If-Unmodified-Since headers are present in the request as follows: + // specified; otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: + // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. If both of the - // If-None-Match and If-Modified-Since headers are present in the request as - // follows: + // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. If both of the If-Match - // and If-Unmodified-Since headers are present in the request as follows: + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -214,39 +302,64 @@ type HeadObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Sets the Cache-Control header of the response. + ResponseCacheControl *string + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string + + // Sets the Content-Type header of the response. + ResponseContentType *string + + // Sets the Expires header of the response. + ResponseExpires *time.Time + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -257,54 +370,77 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string - // The archive state of the head object. This functionality is not supported for - // directory buckets. + // The archive state of the head object. + // + // This functionality is not supported for directory buckets. ArchiveStatus types.ArchiveStatus // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more + // information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in CreateMultipartUpload request. For more information, + // see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string @@ -319,28 +455,45 @@ type HeadObjectOutput struct { // Size of the body in bytes. ContentLength *int64 + // The portion of the object returned in the response for a GET request. + ContentRange *string + // A standard MIME type describing the format of the object data. ContentType *string // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. This - // functionality is not supported for directory buckets. + // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory + // rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + // Date and time when the object was last modified. LastModified *time.Time @@ -352,26 +505,34 @@ type HeadObjectOutput struct { // This is set to the number of metadata entries not returned in x-amz-meta // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. This functionality - // is not supported for directory buckets. + // can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Specifies whether a legal hold is in effect for this object. This header is // only returned if the requester has the s3:GetObjectLegalHold permission. This // header is not returned if the specified version of this object has never had a - // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // legal hold applied. For more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // The Object Lock mode, if any, that's in effect for this object. This header is // only returned if the requester has the s3:GetObjectRetention permission. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. This - // functionality is not supported for directory buckets. + // only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -379,89 +540,118 @@ type HeadObjectOutput struct { PartsCount *int32 // Amazon S3 can return this header if your request involves a bucket that is - // either a source or a destination in a replication rule. In replication, you have - // a source bucket on which you configure replication and destination bucket or - // buckets where Amazon S3 stores object replicas. When you request an object ( - // GetObject ) or object metadata ( HeadObject ) from these buckets, Amazon S3 will - // return the x-amz-replication-status header in the response as follows: + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication and + // destination bucket or buckets where Amazon S3 stores object replicas. When you + // request an object ( GetObject ) or object metadata ( HeadObject ) from these + // buckets, Amazon S3 will return the x-amz-replication-status header in the + // response as follows: + // // - If requesting an object from the source bucket, Amazon S3 will return the // x-amz-replication-status header if the object in your request is eligible for - // replication. For example, suppose that in your replication configuration, you - // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with - // key prefix TaxDocs . Any objects you upload with this key name prefix, for - // example TaxDocs/document1.pdf , are eligible for replication. For any object - // request with this key name prefix, Amazon S3 will return the - // x-amz-replication-status header with value PENDING, COMPLETED or FAILED - // indicating object replication status. + // replication. + // + // For example, suppose that in your replication configuration, you specify object + // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix + // TaxDocs . Any objects you upload with this key name prefix, for example + // TaxDocs/document1.pdf , are eligible for replication. For any object request + // with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // // - If requesting an object from a destination bucket, Amazon S3 will return // the x-amz-replication-status header with value REPLICA if the object in your // request is a replica that Amazon S3 created and there is no replica modification // replication in progress. + // // - When replicating objects to multiple destination buckets, the // x-amz-replication-status header acts differently. The header of the source // object will only return a value of COMPLETED when replication is successful to // all destinations. The header will remain at value PENDING until replication has // completed for all destinations. If one or more destinations fails replication // the header will return FAILED. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // . This functionality is not supported for directory buckets. + // + // For more information, see [Replication]. + // + // This functionality is not supported for directory buckets. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If the object is an archived object (an object whose storage class is GLACIER), // the response includes this header if either the archive restoration is in - // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // or an archive copy is already restored. If an archive copy is already restored, - // the header value indicates when Amazon S3 is scheduled to delete the object - // copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 - // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header - // returns the value ongoing-request="true" . For more information about archiving - // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations) - // . This functionality is not supported for directory buckets. Only the S3 Express - // One Zone storage class is supported by directory buckets to store objects. + // progress (see [RestoreObject]or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value + // ongoing-request="true" . + // + // For more information about archiving objects, see [Transitioning Objects: General Considerations]. + // + // This functionality is not supported for directory buckets. Directory buckets + // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in + // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage + // class) in Dedicated Local Zones. + // + // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -492,25 +682,28 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -528,6 +721,18 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpHeadObjectValidationMiddleware(stack); err != nil { return err } @@ -537,7 +742,7 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { @@ -561,31 +766,38 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *HeadObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// HeadObjectAPIClient is a client that implements the HeadObject operation. -type HeadObjectAPIClient interface { - HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ HeadObjectAPIClient = (*Client)(nil) - // ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter type ObjectExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -601,12 +813,13 @@ type ObjectExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -682,7 +895,16 @@ func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObje } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -728,6 +950,9 @@ func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, out } } + if err != nil { + return false, err + } return true, nil } @@ -737,8 +962,17 @@ type ObjectNotExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -754,12 +988,13 @@ type ObjectNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -836,7 +1071,16 @@ func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadO } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -878,9 +1122,26 @@ func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, } } + if err != nil { + return false, err + } return true, nil } +func (v *HeadObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadObjectAPIClient is a client that implements the HeadObject operation. +type HeadObjectAPIClient interface { + HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) +} + +var _ HeadObjectAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go index 6ecf8458..2580b44a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -14,27 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the analytics -// configurations for the bucket. You can have up to 1,000 analytics configurations -// per bucket. This action supports list pagination and does not return more than -// 100 configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated is -// set to false. If there are more configurations to list, IsTruncated is set to -// true, and there will be a value in NextContinuationToken . You use the +// This operation is not supported for directory buckets. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element in +// the response. If there are no more configurations to list, IsTruncated is set +// to false. If there are more configurations to list, IsTruncated is set to true, +// and there will be a value in NextContinuationToken . You use the // NextContinuationToken value to continue the pagination of the list by passing -// the value in continuation-token in the request to GET the next page. To use -// this operation, you must have permissions to perform the +// the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about Amazon S3 analytics feature, see Amazon S3 Analytics – -// Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to ListBucketAnalyticsConfigurations : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to ListBucketAnalyticsConfigurations : +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { if params == nil { params = &ListBucketAnalyticsConfigurationsInput{} @@ -70,6 +83,7 @@ type ListBucketAnalyticsConfigurationsInput struct { } func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -121,25 +135,28 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -157,6 +174,18 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -166,7 +195,7 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { @@ -190,6 +219,18 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index de4a2079..2a77b680 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to ListBucketIntelligentTieringConfigurations include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { if params == nil { params = &ListBucketIntelligentTieringConfigurationsInput{} @@ -64,6 +77,7 @@ type ListBucketIntelligentTieringConfigurationsInput struct { } func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,25 +129,28 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -151,6 +168,18 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -160,7 +189,7 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { @@ -184,6 +213,18 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go index 881f7d92..0bd20216 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -14,26 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of -// inventory configurations for the bucket. You can have up to 1,000 analytics -// configurations per bucket. This action supports list pagination and does not -// return more than 100 configurations at a time. Always check the IsTruncated -// element in the response. If there are no more configurations to list, -// IsTruncated is set to false. If there are more configurations to list, -// IsTruncated is set to true, and there is a value in NextContinuationToken . You -// use the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Returns a list of inventory configurations for the bucket. You can have up to +// 1,000 analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory] +// // The following operations are related to ListBucketInventoryConfigurations : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { if params == nil { params = &ListBucketInventoryConfigurationsInput{} @@ -71,6 +85,7 @@ type ListBucketInventoryConfigurationsInput struct { } func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -122,25 +137,28 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -158,6 +176,18 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -167,7 +197,7 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { @@ -191,6 +221,18 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go index fc2cf728..7f99075a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -13,28 +13,42 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the metrics -// configurations for the bucket. The metrics configurations are only for the -// request metrics of the bucket and do not provide information on daily storage -// metrics. You can have up to 1,000 configurations per bucket. This action -// supports list pagination and does not return more than 100 configurations at a -// time. Always check the IsTruncated element in the response. If there are no -// more configurations to list, IsTruncated is set to false. If there are more -// configurations to list, IsTruncated is set to true, and there is a value in -// NextContinuationToken . You use the NextContinuationToken value to continue the -// pagination of the list by passing the value in continuation-token in the -// request to GET the next page. To use this operation, you must have permissions -// to perform the s3:GetMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For more information about metrics configurations and CloudWatch request -// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to ListBucketMetricsConfigurations : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// This operation is not supported for directory buckets. +// +// Lists the metrics configurations for the bucket. The metrics configurations are +// only for the request metrics of the bucket and do not provide information on +// daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For more information about metrics configurations and CloudWatch request +// metrics, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to ListBucketMetricsConfigurations : +// +// [PutBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { if params == nil { params = &ListBucketMetricsConfigurationsInput{} @@ -72,6 +86,7 @@ type ListBucketMetricsConfigurationsInput struct { } func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -124,25 +139,28 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -160,6 +178,18 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -169,7 +199,7 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { @@ -193,6 +223,18 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go index a6189288..aec9715d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -13,11 +13,23 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of all -// buckets owned by the authenticated sender of the request. To use this operation, -// you must have the s3:ListAllMyBuckets permission. For information about Amazon -// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// . +// This operation is not supported for directory buckets. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// To grant IAM permission to use this operation, you must add the +// s3:ListAllMyBuckets policy action. +// +// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets]. +// +// We strongly recommend using only paginated ListBuckets requests. Unpaginated +// ListBuckets requests are only supported for Amazon Web Services accounts set to +// the default general purpose bucket quota of 10,000. If you have an approved +// general purpose bucket quota above 10,000, you must send paginated ListBuckets +// requests to list your account’s buckets. All unpaginated ListBuckets requests +// will be rejected for Amazon Web Services accounts with a general purpose bucket +// quota greater than 10,000. +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { if params == nil { params = &ListBucketsInput{} @@ -34,6 +46,44 @@ func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optF } type ListBucketsInput struct { + + // Limits the response to buckets that are located in the specified Amazon Web + // Services Region. The Amazon Web Services Region must be expressed according to + // the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) + // Region. For a list of the valid values for all of the Amazon Web Services + // Regions, see [Regions and Endpoints]. + // + // Requests made to a Regional endpoint that is different from the bucket-region + // parameter are not supported. For example, if you want to limit the response to + // your buckets in Region us-west-2 , the request must be made to an endpoint in + // Region us-west-2 . + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + BucketRegion *string + + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. + // + // Length Constraints: Minimum length of 0. Maximum length of 1024. + // + // Required: No. + // + // If you specify the bucket-region , prefix , or continuation-token query + // parameters without using max-buckets to set the maximum number of buckets + // returned in the response, Amazon S3 applies a default page size of 10,000 and + // provides a continuation token if there are more buckets. + ContinuationToken *string + + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxBuckets *int32 + + // Limits the response to bucket names that begin with the specified bucket name + // prefix. + Prefix *string + noSmithyDocumentSerde } @@ -42,9 +92,20 @@ type ListBucketsOutput struct { // The list of buckets owned by the requester. Buckets []types.Bucket + // ContinuationToken is included in the response when there are more buckets that + // can be listed with pagination. The next ListBuckets request to Amazon S3 can be + // continued with this ContinuationToken . ContinuationToken is obfuscated and is + // not a real bucket. + ContinuationToken *string + // The owner of the buckets listed. Owner *types.Owner + // If Prefix was sent with the request, it is included in the response. + // + // All bucket names in the response begin with the specified bucket name prefix. + Prefix *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -73,25 +134,28 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -109,13 +173,25 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { return err } if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketsUpdateEndpoint(stack, options); err != nil { @@ -139,9 +215,115 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } +// ListBucketsPaginatorOptions is the paginator options for ListBuckets +type ListBucketsPaginatorOptions struct { + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListBucketsPaginator is a paginator for ListBuckets +type ListBucketsPaginator struct { + options ListBucketsPaginatorOptions + client ListBucketsAPIClient + params *ListBucketsInput + nextToken *string + firstPage bool +} + +// NewListBucketsPaginator returns a new ListBucketsPaginator +func NewListBucketsPaginator(client ListBucketsAPIClient, params *ListBucketsInput, optFns ...func(*ListBucketsPaginatorOptions)) *ListBucketsPaginator { + if params == nil { + params = &ListBucketsInput{} + } + + options := ListBucketsPaginatorOptions{} + if params.MaxBuckets != nil { + options.Limit = *params.MaxBuckets + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListBucketsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListBucketsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListBuckets page. +func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxBuckets = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListBuckets(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.ContinuationToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListBucketsAPIClient is a client that implements the ListBuckets operation. +type ListBucketsAPIClient interface { + ListBuckets(context.Context, *ListBucketsInput, ...func(*Options)) (*ListBucketsOutput, error) +} + +var _ ListBucketsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go index 373531ab..d8d85e9d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go @@ -15,23 +15,32 @@ import ( ) // Returns a list of all Amazon S3 directory buckets owned by the authenticated -// sender of the request. For more information about directory buckets, see -// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Regional endpoint. These endpoints -// support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions You must have the -// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy -// instead of a bucket policy. Cross-account access to this API operation isn't -// supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . +// sender of the request. For more information about directory buckets, see [Directory buckets]in the +// Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in +// an IAM identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The BucketRegion response element is not part of the ListDirectoryBuckets +// Response Syntax. +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { if params == nil { params = &ListDirectoryBucketsInput{} @@ -50,8 +59,9 @@ func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectory type ListDirectoryBucketsInput struct { // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. You can use this ContinuationToken for pagination of the list results. + // buckets in this account with a token. ContinuationToken is obfuscated and is + // not a real bucket name. You can use this ContinuationToken for the pagination + // of the list results. ContinuationToken *string // Maximum number of buckets to be returned in response. When the number is more @@ -104,25 +114,28 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -140,13 +153,25 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { return err } if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil { @@ -170,17 +195,21 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -// ListDirectoryBucketsAPIClient is a client that implements the -// ListDirectoryBuckets operation. -type ListDirectoryBucketsAPIClient interface { - ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) -} - -var _ ListDirectoryBucketsAPIClient = (*Client)(nil) - // ListDirectoryBucketsPaginatorOptions is the paginator options for // ListDirectoryBuckets type ListDirectoryBucketsPaginatorOptions struct { @@ -247,6 +276,9 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... } params.MaxDirectoryBuckets = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -266,6 +298,14 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... return result, nil } +// ListDirectoryBucketsAPIClient is a client that implements the +// ListDirectoryBuckets operation. +type ListDirectoryBucketsAPIClient interface { + ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) +} + +var _ ListDirectoryBucketsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go index 3e6853e6..41bc8b46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -16,38 +16,49 @@ import ( // This operation lists in-progress multipart uploads in a bucket. An in-progress // multipart upload is a multipart upload that has been initiated by the // CreateMultipartUpload request, but has not yet been completed or aborted. +// // Directory buckets - If multipart uploads in a directory bucket are in progress, // you can't delete the bucket until all the in-progress multipart uploads are -// aborted or completed. The ListMultipartUploads operation returns a maximum of -// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is -// also the default value. You can further limit the number of uploads in a -// response by specifying the max-uploads request parameter. If there are more -// than 1,000 multipart uploads that satisfy your ListMultipartUploads request, -// the response returns an IsTruncated element with the value of true , a -// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining -// multipart uploads, you need to make subsequent ListMultipartUploads requests. -// In these requests, include two query parameters: key-marker and upload-id-marker -// . Set the value of key-marker to the NextKeyMarker value from the previous -// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker -// value from the previous response. Directory buckets - The upload-id-marker -// element and the NextUploadIdMarker element aren't supported by directory -// buckets. To list the additional multipart uploads, you only need to set the -// value of key-marker to the NextKeyMarker value from the previous response. For -// more information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// aborted or completed. To delete these in-progress multipart uploads, use the +// ListMultipartUploads operation to list the in-progress multipart uploads in the +// bucket and use the AbortMultipartUpload operation to abort all the in-progress +// multipart uploads. +// +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default value. +// You can further limit the number of uploads in a response by specifying the +// max-uploads request parameter. If there are more than 1,000 multipart uploads +// that satisfy your ListMultipartUploads request, the response returns an +// IsTruncated element with the value of true , a NextKeyMarker element, and a +// NextUploadIdMarker element. To list the remaining multipart uploads, you need to +// make subsequent ListMultipartUploads requests. In these requests, include two +// query parameters: key-marker and upload-id-marker . Set the value of key-marker +// to the NextKeyMarker value from the previous response. Similarly, set the value +// of upload-id-marker to the NextUploadIdMarker value from the previous response. +// +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -55,29 +66,49 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting of multipart uploads in response +// // - General purpose bucket - In the ListMultipartUploads response, the multipart // uploads are sorted based on two criteria: +// // - Key-based sorting - Multipart uploads are initially sorted in ascending // order based on their object keys. +// // - Time-based sorting - For uploads that share the same object key, they are // further sorted in ascending order based on the upload initiation time. Among // uploads with the same key, the one that was initiated first will appear before // the ones that were initiated later. +// // - Directory bucket - In the ListMultipartUploads response, the multipart // uploads aren't sorted lexicographically based on the object keys. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListMultipartUploads : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to ListMultipartUploads : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { if params == nil { params = &ListMultipartUploadsInput{} @@ -95,50 +126,69 @@ func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipart type ListMultipartUploadsInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string - // Character you use to group keys. All keys that contain the same string between - // the prefix, if specified, and the first occurrence of the delimiter after the - // prefix are grouped under a single result element, CommonPrefixes . If you don't - // specify the prefix parameter, then the substring starts at the beginning of the - // key. The keys that are grouped under CommonPrefixes result element are not - // returned elsewhere in the response. Directory buckets - For directory buckets, / - // is the only supported delimiter. + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and the + // first occurrence of the delimiter after the prefix are grouped under a single + // result element, CommonPrefixes . If you don't specify the prefix parameter, then + // the substring starts at the beginning of the key. The keys that are grouped + // under CommonPrefixes result element are not returned elsewhere in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -147,20 +197,26 @@ type ListMultipartUploadsInput struct { ExpectedBucketOwner *string // Specifies the multipart upload after which listing should begin. + // // - General purpose buckets - For general purpose buckets, key-marker is an // object key. Together with upload-id-marker , this parameter specifies the - // multipart upload after which listing should begin. If upload-id-marker is not - // specified, only the keys lexicographically greater than the specified - // key-marker will be included in the list. If upload-id-marker is specified, any - // multipart uploads for a key equal to the key-marker might also be included, - // provided those multipart uploads have upload IDs lexicographically greater than - // the specified upload-id-marker . + // multipart upload after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to the + // key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker . + // // - Directory buckets - For directory buckets, key-marker is obfuscated and // isn't a real object key. The upload-id-marker parameter isn't supported by // directory buckets. To list the additional multipart uploads, you only need to // set the value of key-marker to the NextKeyMarker value from the previous - // response. In the ListMultipartUploads response, the multipart uploads aren't - // sorted lexicographically based on the object keys. + // response. + // + // In the ListMultipartUploads response, the multipart uploads aren't sorted + // lexicographically based on the object keys. KeyMarker *string // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the @@ -171,32 +227,38 @@ type ListMultipartUploadsInput struct { // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping of // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) Directory buckets - For directory buckets, only - // prefixes that end in a delimiter ( / ) are supported. + // use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . This functionality is not - // supported for directory buckets. + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string noSmithyDocumentSerde } func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -210,20 +272,25 @@ type ListMultipartUploadsOutput struct { // If you specify a delimiter in the request, then the result returns each // distinct key prefix containing the delimiter in a CommonPrefixes element. The - // distinct key prefixes are returned in the Prefix child element. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // distinct key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. CommonPrefixes []types.CommonPrefix // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. Directory - // buckets - For directory buckets, / is the only supported delimiter. + // delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If you - // specify the encoding-type request parameter, Amazon S3 includes this element in - // the response, and returns encoded key name values in the following response - // elements: Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . EncodingType types.EncodingType // Indicates whether the returned list of multipart uploads is truncated. A value @@ -244,22 +311,31 @@ type ListMultipartUploadsOutput struct { NextKeyMarker *string // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. This - // functionality is not supported for directory buckets. + // for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. NextUploadIdMarker *string // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. - // Directory buckets - For directory buckets, only prefixes that end in a delimiter - // ( / ) are supported. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // Upload ID after which listing began. This functionality is not supported for - // directory buckets. + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string // Container for elements related to a particular multipart upload. A response can @@ -294,25 +370,28 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -330,6 +409,18 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { return err } @@ -339,7 +430,7 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { @@ -363,6 +454,18 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go index 2a0cd10f..6cfc9862 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -13,19 +13,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns metadata about -// all versions of the objects in a bucket. You can also use request parameters as -// selection criteria to return metadata about a subset of all the object versions. +// This operation is not supported for directory buckets. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// // To use this operation, you must have permission to perform the -// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK -// response can contain valid or invalid XML. Make sure to design your application -// to parse the contents of the response and handle it appropriately. To use this -// operation, you must have READ access to the bucket. The following operations are -// related to ListObjectVersions : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// s3:ListBucketVersions action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { if params == nil { params = &ListObjectVersionsInput{} @@ -55,12 +70,20 @@ type ListObjectVersionsInput struct { // are not returned elsewhere in the response. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -93,10 +116,12 @@ type ListObjectVersionsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Specifies the object version you want to start listing from. @@ -106,6 +131,7 @@ type ListObjectVersionsInput struct { } func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -117,7 +143,10 @@ type ListObjectVersionsOutput struct { // calculating the number of returns. CommonPrefixes []types.CommonPrefix - // Container for an object that is a delete marker. + // Container for an object that is a delete marker. To learn more about delete + // markers, see [Working with delete markers]. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarkers []types.DeleteMarkerEntry // The delimiter grouping the included keys. A delimiter is a character that you @@ -127,10 +156,13 @@ type ListObjectVersionsOutput struct { // max-keys limitation. These keys are not returned elsewhere in the response. Delimiter *string - // Encoding type used by Amazon S3 to encode object key names in the XML response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . + // response elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -164,7 +196,9 @@ type ListObjectVersionsOutput struct { Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Marks the last version of the key returned in a truncated response. @@ -201,25 +235,28 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -237,6 +274,18 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { return err } @@ -246,7 +295,7 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { @@ -270,6 +319,18 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go index f0732edc..61aa4deb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -13,19 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns some or all (up -// to 1,000) of the objects in a bucket. You can use the request parameters as -// selection criteria to return a subset of the objects in a bucket. A 200 OK -// response can contain valid or invalid XML. Be sure to design your application to -// parse the contents of the response and handle it appropriately. This action has -// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// , when developing applications. For backward compatibility, Amazon S3 continues -// to support ListObjects . The following operations are related to ListObjects : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// This operation is not supported for directory buckets. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use the +// request parameters as selection criteria to return a subset of the objects in a +// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design +// your application to parse the contents of the response and handle it +// appropriately. +// +// This action has been revised. We recommend that you use the newer version, [ListObjectsV2], +// when developing applications. For backward compatibility, Amazon S3 continues to +// support ListObjects . +// +// The following operations are related to ListObjects : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListBuckets] +// +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { if params == nil { params = &ListObjectsInput{} @@ -43,31 +59,40 @@ func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optF type ListObjectsInput struct { - // The name of the bucket containing the objects. Directory buckets - When you use - // this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket containing the objects. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -75,12 +100,20 @@ type ListObjectsInput struct { // A delimiter is a character that you use to group keys. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -113,6 +146,7 @@ type ListObjectsInput struct { } func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -121,14 +155,20 @@ func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { type ListObjectsOutput struct { // All of the keys (up to 1,000) rolled up in a common prefix count as a single - // return when calculating the number of returns. A response can contain - // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if - // there are any) keys between Prefix and the next occurrence of the string - // specified by the delimiter. CommonPrefixes lists keys that act like - // subdirectories in the directory specified by Prefix . For example, if the prefix - // is notes/ and the delimiter is a slash ( / ), as in notes/summer/july , the - // common prefix is notes/summer/ . All of the keys that roll up into a common - // prefix count as a single return when calculating the number of returns. + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. CommonPrefixes []types.CommonPrefix // Metadata about each object returned. @@ -141,7 +181,20 @@ type ListObjectsOutput struct { // MaxKeys value. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -161,18 +214,21 @@ type ListObjectsOutput struct { // When the response is truncated (the IsTruncated element value in the response // is true ), you can use the key name in this field as the marker parameter in // the subsequent request to get the next set of objects. Amazon S3 lists objects - // in alphabetical order. This element is returned only if you have the delimiter - // request parameter specified. If the response does not include the NextMarker - // element and it is truncated, you can use the value of the last Key element in - // the response as the marker parameter in the subsequent request to get the next - // set of object keys. + // in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it is + // truncated, you can use the value of the last Key element in the response as the + // marker parameter in the subsequent request to get the next set of object keys. NextMarker *string // Keys that begin with the indicated prefix. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -203,25 +259,28 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -239,6 +298,18 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectsValidationMiddleware(stack); err != nil { return err } @@ -248,7 +319,7 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListObjectsUpdateEndpoint(stack, options); err != nil { @@ -272,6 +343,18 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go index b2f182df..efa2e437 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -17,26 +17,34 @@ import ( // You can use the request parameters as selection criteria to return a subset of // the objects in a bucket. A 200 OK response can contain valid or invalid XML. // Make sure to design your application to parse the contents of the response and -// handle it appropriately. For more information about listing objects, see -// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) -// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// . Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// handle it appropriately. For more information about listing objects, see [Listing object keys programmatically]in the +// Amazon S3 User Guide. To get a list of your buckets, see [ListBuckets]. +// +// - General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't +// return prefixes that are related only to in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, ListObjectsV2 response includes +// the prefixes that are related only to in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - To use this operation, you must have // READ access to the bucket. You must have permission to perform the // s3:ListBucket action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -44,24 +52,43 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting order of returned objects +// // - General purpose bucket - For general purpose buckets, ListObjectsV2 returns // objects in lexicographical order based on their key names. +// // - Directory bucket - For directory buckets, ListObjectsV2 does not return // objects in lexicographical order. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the -// latest revision of this action. We recommend that you use this revised API -// operation for application development. For backward compatibility, Amazon S3 -// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) -// . The following operations are related to ListObjectsV2 : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, [ListObjects]. +// +// The following operations are related to ListObjectsV2 : +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { if params == nil { params = &ListObjectsV2Input{} @@ -79,30 +106,38 @@ func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, type ListObjectsV2Input struct { - // Directory buckets - When you use this operation with a directory bucket, you + // Directory buckets - When you use this operation with a directory bucket, you // must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,16 +148,31 @@ type ListObjectsV2Input struct { ContinuationToken *string // A delimiter is a character that you use to group keys. - // - Directory buckets - For directory buckets, / is the only supported - // delimiter. + // + // - Directory buckets - For directory buckets, / is the only supported delimiter. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -132,8 +182,10 @@ type ListObjectsV2Input struct { // The owner field is not present in ListObjectsV2 by default. If you want to // return the owner field with each key in the result, then set the FetchOwner - // field to true . Directory buckets - For directory buckets, the bucket owner is - // returned as the object owner for all objects. + // field to true . + // + // Directory buckets - For directory buckets, the bucket owner is returned as the + // object owner for all objects. FetchOwner *bool // Sets the maximum number of keys returned in the response. By default, the @@ -142,29 +194,35 @@ type ListObjectsV2Input struct { MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. This functionality is not supported - // for directory buckets. + // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. OptionalObjectAttributes []types.OptionalObjectAttributes - // Limits the response to keys that begin with the specified prefix. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that she or he will be charged for the list // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. This functionality is not supported for directory buckets. + // their requests. + // + // This functionality is not supported for directory buckets. RequestPayer types.RequestPayer // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. This - // functionality is not supported for directory buckets. + // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. StartAfter *string noSmithyDocumentSerde } func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -174,43 +232,57 @@ type ListObjectsV2Output struct { // All of the keys (up to 1,000) that share the same prefix are grouped together. // When counting the total numbers of returns by this API operation, this group of - // keys is considered as one item. A response can contain CommonPrefixes only if - // you specify a delimiter. CommonPrefixes contains all (if there are any) keys - // between Prefix and the next occurrence of the string specified by a delimiter. + // keys is considered as one item. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by a delimiter. + // // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . For example, if the prefix is notes/ and the delimiter is - // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All - // of the keys that roll up into a common prefix count as a single return when - // calculating the number of returns. + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. + // // - Directory buckets - For directory buckets, only prefixes that end in a // delimiter ( / ) are supported. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html CommonPrefixes []types.CommonPrefix // Metadata about each object returned. Contents []types.Object - // If ContinuationToken was sent with the request, it is included in the response. - // You can use the returned ContinuationToken for pagination of the list response. - // You can use this ContinuationToken for pagination of the list results. + // If ContinuationToken was sent with the request, it is included in the + // response. You can use the returned ContinuationToken for pagination of the list + // response. You can use this ContinuationToken for pagination of the list + // results. ContinuationToken *string // Causes keys that contain the same string between the prefix and the first // occurrence of the delimiter to be rolled up into a single result element in the // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. Directory buckets - For directory buckets, / is the only - // supported delimiter. + // MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: Delimiter, Prefix, Key, and StartAfter . + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter . EncodingType types.EncodingType // Set to false if all of the results were returned. Set to true if more keys are @@ -237,16 +309,21 @@ type ListObjectsV2Output struct { // obfuscated and is not a real key NextContinuationToken *string - // Keys that begin with the indicated prefix. Directory buckets - For directory - // buckets, only prefixes that end in a delimiter ( / ) are supported. + // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If StartAfter was sent with the request, it is included in the response. This - // functionality is not supported for directory buckets. + // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. StartAfter *string // Metadata pertaining to the operation's result. @@ -277,25 +354,28 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -313,6 +393,18 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { return err } @@ -322,7 +414,7 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { @@ -346,23 +438,21 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *ListObjectsV2Input) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ ListObjectsV2APIClient = (*Client)(nil) - // ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 type ListObjectsV2PaginatorOptions struct { // Sets the maximum number of keys returned in the response. By default, the @@ -428,6 +518,9 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O } params.MaxKeys = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -450,6 +543,20 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O return result, nil } +func (v *ListObjectsV2Input) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) +} + +var _ ListObjectsV2APIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go index 11b0d59b..4ddd495e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -14,55 +14,81 @@ import ( "time" ) -// Lists the parts that have been uploaded for a specific multipart upload. To use -// this operation, you must provide the upload ID in the request. You obtain this -// uploadID by sending the initiate multipart upload request through -// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of +// Lists the parts that have been uploaded for a specific multipart upload. +// +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload]. +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of // 1,000 parts is also the default value. You can restrict the number of parts in a // response by specifying the max-parts request parameter. If your multipart // upload consists of more than 1,000 parts, the response returns an IsTruncated // field with the value of true , and a NextPartNumberMarker element. To list // remaining uploaded parts, in subsequent ListParts requests, include the // part-number-marker query string parameter and set its value to the -// NextPartNumberMarker field value from the previous response. For more -// information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. If the upload was created using server-side -// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer -// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must -// have permission to the kms:Decrypt action for the ListParts request to -// succeed. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// +// If the upload was created using server-side encryption with Key Management +// +// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon +// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt +// action for the ListParts request to succeed. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to ListParts : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListParts : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// [AbortMultipartUpload] +// +// [GetObjectAttributes] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { if params == nil { params = &ListPartsInput{} @@ -80,31 +106,40 @@ func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns . type ListPartsInput struct { - // The name of the bucket to which the parts are being uploaded. Directory buckets - // - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the parts are being uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -135,37 +170,46 @@ type ListPartsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -177,17 +221,21 @@ type ListPartsOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, then the response includes this header indicating // when the initiated multipart upload will become eligible for abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // . The response will also include the x-amz-abort-rule-id header that will - // provide the ID of the lifecycle configuration rule that defines this action. + // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + // // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not @@ -197,13 +245,22 @@ type ListPartsOutput struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in CreateMultipartUpload request. For more information, + // see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Container element that identifies who initiated the multipart upload. If the // initiator is an Amazon Web Services account, this element provides the same // information as the Owner element. If the initiator is an IAM User, this element // provides the user ARN and display name. Initiator *types.Initiator - // Indicates whether the returned list of parts is truncated. A true value + // Indicates whether the returned list of parts is truncated. A true value // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. IsTruncated *bool @@ -221,13 +278,14 @@ type ListPartsOutput struct { // Container element that identifies the object owner, after the object is // created. If multipart upload is initiated by an IAM user, this element provides - // the parent account ID and display name. Directory buckets - The bucket owner is - // returned as the object owner for all the parts. + // the parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the parts. Owner *types.Owner - // When a list is truncated, this element specifies the last part in the list, as - // well as the value to use for the part-number-marker request parameter in a - // subsequent request. + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. PartNumberMarker *string // Container for elements related to a particular part. A response can contain @@ -235,12 +293,16 @@ type ListPartsOutput struct { Parts []types.Part // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // The class of storage used to store the uploaded object. Directory buckets - - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // The class of storage used to store the uploaded object. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass types.StorageClass // Upload ID identifying the multipart upload whose parts are being listed. @@ -274,25 +336,28 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -310,6 +375,18 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListPartsValidationMiddleware(stack); err != nil { return err } @@ -319,7 +396,7 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListPartsUpdateEndpoint(stack, options); err != nil { @@ -343,23 +420,21 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *ListPartsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// ListPartsAPIClient is a client that implements the ListParts operation. -type ListPartsAPIClient interface { - ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ ListPartsAPIClient = (*Client)(nil) - // ListPartsPaginatorOptions is the paginator options for ListParts type ListPartsPaginatorOptions struct { // Sets the maximum number of parts to return. @@ -423,6 +498,9 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio } params.MaxParts = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListParts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -445,6 +523,20 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio return result, nil } +func (v *ListPartsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListPartsAPIClient is a client that implements the ListParts operation. +type ListPartsAPIClient interface { + ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) +} + +var _ ListPartsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go index 80344efb..51de19c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -15,30 +15,45 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the accelerate -// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a -// bucket-level feature that enables you to perform faster data transfers to Amazon -// S3. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster data +// transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the // s3:PutAccelerateConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The Transfer Acceleration state of a bucket can be set to one of the following +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The Transfer Acceleration state of a bucket can be set to one of the following // two values: +// // - Enabled – Enables accelerated data transfers to the bucket. +// // - Suspended – Disables accelerated data transfers to the bucket. // -// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// action returns the transfer acceleration state of a bucket. After setting the -// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty -// minutes before the data transfer rates to the bucket increase. The name of the -// bucket used for Transfer Acceleration must be DNS-compliant and must not contain -// periods ("."). For more information about transfer acceleration, see Transfer -// Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . The following operations are related to PutBucketAccelerateConfiguration : -// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it might +// take up to thirty minutes before the data transfer rates to the bucket increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant and +// must not contain periods ("."). +// +// For more information about transfer acceleration, see [Transfer Acceleration]. +// +// The following operations are related to PutBucketAccelerateConfiguration : +// +// [GetBucketAccelerateConfiguration] +// +// [CreateBucket] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { if params == nil { params = &PutBucketAccelerateConfigurationInput{} @@ -66,14 +81,17 @@ type PutBucketAccelerateConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -85,6 +103,7 @@ type PutBucketAccelerateConfigurationInput struct { } func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -118,25 +137,28 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -154,6 +176,21 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -163,7 +200,7 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { @@ -190,6 +227,18 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -219,9 +268,10 @@ func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{} } func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go index 6382d276..a7d37a86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -15,89 +15,159 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the permissions on -// an existing bucket using access control lists (ACL). For more information, see -// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can -// use one of the following two ways to set a bucket's permissions: +// This operation is not supported for directory buckets. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the +// WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// // - Specify the ACL in the request body +// // - Specify permissions using request headers // // You cannot specify access permission using both the body and the request -// headers. Depending on your application needs, you may choose to set the ACL on a -// bucket using either the request body or the headers. For example, if you have an +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an // existing application that updates a bucket ACL using the request body, then you -// can continue to use that approach. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions by using -// one of the following methods: +// can continue to use that approach. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions by using one of the following +// methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-acl . If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use the // x-amz-acl header to set a canned ACL. These parameters map to the set of -// permissions that Amazon S3 supports in an ACL. For more information, see -// Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-write header grants create, overwrite, and delete objects -// permission to LogDelivery group predefined by Amazon S3 and two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-write: -// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", -// id="555566667777" +// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-write header grants create, overwrite, +// +// and delete objects permission to LogDelivery group predefined by Amazon S3 and +// two Amazon Web Services accounts identified by their email addresses. +// +// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// +// id="111122223333", id="555566667777" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>& The grantee is resolved to the -// CanonicalUser and, in a response to a GET Object acl request, appears as the -// CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>& +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // The following operations are related to PutBucketAcl : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetObjectAcl] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { if params == nil { params = &PutBucketAclInput{} @@ -126,21 +196,27 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *types.AccessControlPolicy - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -158,9 +234,10 @@ type PutBucketAclInput struct { // Allows grantee to read the bucket ACL. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string // Allows grantee to write the ACL for the applicable bucket. @@ -170,6 +247,7 @@ type PutBucketAclInput struct { } func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -203,25 +281,28 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -239,6 +320,21 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { return err } @@ -248,7 +344,7 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { @@ -278,6 +374,18 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -307,9 +415,10 @@ func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketAclRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go index 9b1009f6..8e13fbcd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -14,45 +14,67 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets an analytics -// configuration for the bucket (specified by the analytics configuration ID). You -// can have up to 1,000 analytics configurations per bucket. You can choose to have -// storage class analysis export analysis reports sent to a comma-separated values -// (CSV) flat file. See the DataExport request element. Reports are updated daily -// and are based on the object filters that you configure. When selecting data -// export, you specify a destination bucket and an optional destination prefix -// where the file is written. You can export the data to a destination bucket in a -// different account. However, the destination bucket must be in the same Region as -// the bucket that you are making the PUT analytics configuration to. For more -// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . You must create a bucket policy on the destination bucket where the exported +// This operation is not supported for directory buckets. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per bucket. +// +// You can choose to have storage class analysis export analysis reports sent to a +// comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you +// configure. When selecting data export, you specify a destination bucket and an +// optional destination prefix where the file is written. You can export the data +// to a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// You must create a bucket policy on the destination bucket where the exported // file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory -// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . To use this operation, you must have permissions to perform the +// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketAnalyticsConfiguration has the following special errors: +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketAnalyticsConfiguration has the following special errors: +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: InvalidArgument +// // - Cause: Invalid argument. +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: TooManyConfigurations +// // - Cause: You are attempting to create a new configuration but have already // reached the 1,000-configuration limit. +// // - HTTP Error: HTTP 403 Forbidden +// // - Code: AccessDenied +// // - Cause: You are not the owner of the specified bucket, or you do not have // the s3:PutAnalyticsConfiguration bucket permission to set the configuration on // the bucket. // // The following operations are related to PutBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &PutBucketAnalyticsConfigurationInput{} @@ -94,6 +116,7 @@ type PutBucketAnalyticsConfigurationInput struct { } func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -127,25 +150,28 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -163,6 +189,18 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -172,7 +210,7 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { @@ -196,6 +234,18 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go index 394a2bad..653a1598 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -15,35 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the cors -// configuration for your bucket. If the configuration exists, Amazon S3 replaces -// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS +// This operation is not supported for directory buckets. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. You set this configuration on a bucket so that the bucket can service +// others. +// +// You set this configuration on a bucket so that the bucket can service // cross-origin requests. For example, you might want to enable a request whose // origin is http://www.example.com to access your Amazon S3 bucket at -// my.example.bucket.com by using the browser's XMLHttpRequest capability. To -// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// my.example.bucket.com by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors // subresource to the bucket. The cors subresource is an XML document in which you // configure rules that identify origins and the HTTP methods that can be executed -// on your bucket. The document is limited to 64 KB in size. When Amazon S3 -// receives a cross-origin request (or a pre-flight OPTIONS request) against a -// bucket, it evaluates the cors configuration on the bucket and uses the first -// CORSRule rule that matches the incoming browser request to enable a cross-origin -// request. For a rule to match, the following conditions must be met: +// on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS +// request) against a bucket, it evaluates the cors configuration on the bucket +// and uses the first CORSRule rule that matches the incoming browser request to +// enable a cross-origin request. For a rule to match, the following conditions +// must be met: +// // - The request's Origin header must match AllowedOrigin elements. +// // - The request method (for example, GET, PUT, HEAD, and so on) or the // Access-Control-Request-Method header in case of a pre-flight OPTIONS request // must be one of the AllowedMethod elements. +// // - Every header specified in the Access-Control-Request-Headers request header // of a pre-flight request must match an AllowedHeader element. // -// For more information about CORS, go to Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketCors : -// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// The following operations are related to PutBucketCors : +// +// [GetBucketCors] +// +// [DeleteBucketCors] +// +// [RESTOPTIONSobject] +// +// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { if params == nil { params = &PutBucketCorsInput{} @@ -67,27 +86,34 @@ type PutBucketCorsInput struct { Bucket *string // Describes the cross-origin access configuration for objects in an Amazon S3 - // bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) - // in the Amazon S3 User Guide. + // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. + // + // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html // // This member is required. CORSConfiguration *types.CORSConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +125,7 @@ type PutBucketCorsInput struct { } func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -132,25 +159,28 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -168,6 +198,21 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -177,7 +222,7 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { @@ -207,6 +252,18 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +293,10 @@ func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go index 615e98a3..87d25702 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -15,30 +15,116 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action uses the -// encryption subresource to configure default encryption and Amazon S3 Bucket Keys -// for an existing bucket. By default, all buckets have a default encryption -// configuration that uses server-side encryption with Amazon S3 managed keys -// (SSE-S3). You can optionally configure default encryption for a bucket by using -// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or -// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). -// If you specify default encryption by using SSE-KMS, you can also configure -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does -// not validate the KMS key ID provided in PutBucketEncryption requests. This -// action requires Amazon Web Services Signature Version 4. For more information, -// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// . To use this operation, you must have permission to perform the -// s3:PutEncryptionConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketEncryption : -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// This operation configures default encryption and Amazon S3 Bucket Keys for an +// existing bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets +// +// - You can optionally configure default encryption for a bucket by using +// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or +// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). +// If you specify default encryption by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. +// For information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the +// Amazon S3 User Guide. +// +// - If you use PutBucketEncryption to set your [default bucket encryption]to SSE-KMS, you should verify +// that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID +// provided in PutBucketEncryption requests. +// +// - Directory buckets - You can optionally configure default encryption for a +// bucket by using server-side encryption with Key Management Service (KMS) keys +// (SSE-KMS). +// +// - We recommend that the bucket's default encryption uses the desired +// encryption configuration and you don't override the bucket default encryption in +// your CreateSession requests or PUT object requests. Then, new objects are +// automatically encrypted with the desired encryption settings. For more +// information about the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads] +// . +// +// - Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket's +// lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. +// +// - S3 Bucket Keys are always enabled for GET and PUT operations in a directory +// bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy +// SSE-KMS encrypted objects from general purpose buckets to directory buckets, +// from directory buckets to general purpose buckets, or between directory buckets, +// through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a +// copy request is made for a KMS-encrypted object. +// +// - When you specify an [KMS customer managed key]for encryption in your directory bucket, only use the +// key ID or key ARN. The key alias format of the KMS key isn't supported. +// +// - For directory buckets, if you use PutBucketEncryption to set your [default bucket encryption]to +// SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption +// requests. +// +// If you're specifying a customer managed KMS key, we recommend using a fully +// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the +// key within the requester’s account. This behavior can result in data that's +// encrypted with a KMS key that belongs to the requester, and not the bucket +// owner. +// +// Also, this action requires Amazon Web Services Signature Version 4. For more +// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:PutEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// To set a directory bucket default encryption with SSE-KMS, you must also have +// +// the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based +// policies and KMS key policies for the target KMS key. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to PutBucketEncryption : +// +// [GetBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job +// [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { if params == nil { params = &PutBucketEncryptionInput{} @@ -57,13 +143,18 @@ func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncry type PutBucketEncryptionInput struct { // Specifies default encryption for a bucket using server-side encryption with - // different key options. By default, all buckets have a default encryption - // configuration that uses server-side encryption with Amazon S3 managed keys - // (SSE-S3). You can optionally configure default encryption for a bucket by using - // server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a - // customer-provided key (SSE-C). For information about the bucket default - // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon S3 User Guide. + // different key options. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string @@ -73,31 +164,45 @@ type PutBucketEncryptionInput struct { // This member is required. ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the server-side encryption - // configuration. For requests made using the Amazon Web Services Command Line - // Interface (CLI) or Amazon Web Services SDKs, this field is calculated - // automatically. + // The Base64 encoded 128-bit MD5 digest of the server-side encryption + // configuration. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -131,25 +236,28 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -167,6 +275,21 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -176,7 +299,7 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { @@ -206,6 +329,18 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -235,9 +370,10 @@ func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bo } func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go index a5f8fc7a..c032b403 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -14,37 +14,58 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Puts a S3 -// Intelligent-Tiering configuration to the specified bucket. You can have up to -// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can +// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to PutBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] // // You only need S3 Intelligent-Tiering enabled on a bucket if you want to // automatically move objects stored in the S3 Intelligent-Tiering storage class to // the Archive Access or Deep Archive Access tier. -// PutBucketIntelligentTieringConfiguration has the following special errors: HTTP -// 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad -// Request Error Code: TooManyConfigurations Cause: You are attempting to create a -// new configuration but have already reached the 1,000-configuration limit. HTTP -// 403 Forbidden Error Cause: You are not the owner of the specified bucket, or you -// do not have the s3:PutIntelligentTieringConfiguration bucket permission to set -// the configuration on the bucket. +// +// PutBucketIntelligentTieringConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission +// to set the configuration on the bucket. +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &PutBucketIntelligentTieringConfigurationInput{} @@ -82,6 +103,7 @@ type PutBucketIntelligentTieringConfigurationInput struct { } func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,25 +137,28 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -151,6 +176,18 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -160,7 +197,7 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { @@ -184,6 +221,18 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go index e2d06796..773ba76e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -14,48 +14,76 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the PUT action adds an inventory configuration (identified by the inventory ID) -// to the bucket. You can have up to 1,000 inventory configurations per bucket. +// This operation is not supported for directory buckets. +// +// This implementation of the PUT action adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// // Amazon S3 inventory generates inventories of the objects in the bucket on a // daily or weekly basis, and the results are published to a flat file. The bucket // that is inventoried is called the source bucket, and the bucket where the // inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same Amazon Web Services Region as the source bucket. When -// you configure an inventory for a source bucket, you specify the destination -// bucket where you want the inventory to be stored, and whether to generate the -// inventory daily or weekly. You can also configure what object metadata to -// include and whether to inventory all object versions or only current versions. -// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon S3 User Guide. You must create a bucket policy on the destination -// bucket to grant permissions to Amazon S3 to write objects to the bucket in the -// defined location. For an example policy, see Granting Permissions for Amazon S3 -// Inventory and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . Permissions To use this operation, you must have permission to perform the +// bucket must be in the same Amazon Web Services Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the +// destination bucket where you want the inventory to be stored, and whether to +// generate the inventory daily or weekly. You can also configure what object +// metadata to include and whether to inventory all object versions or only current +// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For an +// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// Permissions To use this operation, you must have permission to perform the // s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. The -// s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) -// report that includes all object metadata fields available and to specify the +// default and can grant this permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report +// that includes all object metadata fields available and to specify the // destination bucket to store the inventory. A user with read access to objects in // the destination bucket can also access all object metadata fields that are -// available in the inventory report. To restrict access to an inventory report, -// see Restricting access to an Amazon S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) -// in the Amazon S3 User Guide. For more information about the metadata fields -// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) -// in the Amazon S3 User Guide. For more information about permissions, see -// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following -// special errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid -// Argument HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. The following -// operations are related to PutBucketInventoryConfiguration : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// available in the inventory report. +// +// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide. +// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists] +// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3] +// in the Amazon S3 User Guide. +// +// PutBucketInventoryConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutInventoryConfiguration bucket permission to set +// the configuration on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration : +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html +// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 +// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { if params == nil { params = &PutBucketInventoryConfigurationInput{} @@ -97,6 +125,7 @@ type PutBucketInventoryConfigurationInput struct { } func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -130,25 +159,28 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -166,6 +198,18 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -175,7 +219,7 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { @@ -199,6 +243,18 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go index ac2b63eb..567b8077 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -15,25 +15,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a new lifecycle -// configuration for the bucket or replaces an existing lifecycle configuration. -// Keep in mind that this will overwrite an existing lifecycle configuration, so if -// you want to retain any configuration details, they must be included in the new -// lifecycle configuration. For information about lifecycle configuration, see -// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// . Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The previous version of the -// API supported filtering based only on an object key name prefix, which is -// supported for backward compatibility. For the related API description, see -// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// . Rules You specify the lifecycle configuration in your request body. The -// lifecycle configuration is specified as XML consisting of one or more rules. An -// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not -// adjustable. Each rule consists of the following: +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information about +// lifecycle configuration, see [Managing your storage lifecycle]. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API. The +// previous version of the API supported filtering based only on an object key name +// prefix, which is supported for backward compatibility. For the related API +// description, see [PutBucketLifecycle]. +// +// Rules Permissions HTTP Host header syntax You specify the lifecycle +// configuration in your request body. The lifecycle configuration is specified as +// XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can +// have up to 1,000 rules. This limit is not adjustable. +// +// Bucket lifecycle configuration supports specifying a lifecycle rule using an +// object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility for general purpose buckets. For +// the related API description, see [PutBucketLifecycle]. +// +// Lifecyle configurations for directory buckets only support expiring objects and +// cancelling multipart uploads. Expiring of versioned objects,transitions and tag +// filters are not supported. +// +// A lifecycle rule consists of the following: // // - A filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, or a combination of both. +// filter can be based on a key name prefix, object tags, object size, or any +// combination of these. // // - A status indicating whether the rule is in effect. // @@ -44,28 +59,69 @@ import ( // versions). Amazon S3 provides predefined actions that you can specify for // current and noncurrent object versions. // -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html) -// . Permissions By default, all Amazon S3 resources are private, including -// buckets, objects, and related subresources (for example, lifecycle configuration -// and website configuration). Only the resource owner (that is, the Amazon Web -// Services account that created it) can access the resource. The resource owner -// can optionally grant access permissions to others by writing an access policy. -// For this operation, a user must get the s3:PutLifecycleConfiguration -// permission. You can also explicitly deny permissions. An explicit deny also -// supersedes any other permissions. If you want to block users or accounts from -// removing or deleting objects from your bucket, you must deny them permissions -// for the following actions: -// - s3:DeleteObject -// - s3:DeleteObjectVersion -// - s3:PutLifecycleConfiguration +// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements]. +// +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. An explicit deny also supersedes any +// +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// - s3:DeleteObject +// +// - s3:DeleteObjectVersion +// +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. // -// For more information about permissions, see Managing Access Permissions to Your -// Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to PutBucketLifecycleConfiguration : -// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The following operations are related to PutBucketLifecycleConfiguration : +// +// [GetBucketLifecycleConfiguration] +// +// [DeleteBucketLifecycle] +// +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { if params == nil { params = &PutBucketLifecycleConfigurationInput{} @@ -88,33 +144,79 @@ type PutBucketLifecycleConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *types.BucketLifecycleConfiguration + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + noSmithyDocumentSerde } func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketLifecycleConfigurationOutput struct { + + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -143,25 +245,28 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -179,6 +284,21 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -188,7 +308,7 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { @@ -218,6 +338,18 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -247,9 +379,10 @@ func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) } func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go index e69fa24c..53342aca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -15,39 +15,68 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Set the logging -// parameters for a bucket and to specify permissions for who can view and modify -// the logging parameters. All logs are saved to buckets in the same Amazon Web -// Services Region as the source bucket. To set the logging status of a bucket, you -// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL -// to all logs. You use the Grantee request element to grant access to other -// people. The Permissions request element specifies the kind of access the -// grantee has to the logs. If the target bucket for log delivery uses the bucket -// owner enforced setting for S3 Object Ownership, you can't use the Grantee -// request element to grant access to others. Permissions can only be granted using -// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) -// to whom you're assigning access rights (by using request elements) in the -// following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By Email address: <>Grantees@email.com<> The grantee is resolved to the -// CanonicalUser and, in a response to a GETObjectAcl request, appears as the -// CanonicalUser. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// This operation is not supported for directory buckets. +// +// Set the logging parameters for a bucket and to specify permissions for who can +// view and modify the logging parameters. All logs are saved to buckets in the +// same Amazon Web Services Region as the source bucket. To set the logging status +// of a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the +// Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (by using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By Email address: +// +// <>Grantees@email.com<> +// +// The grantee is resolved to the CanonicalUser and, in a response to a +// +// GETObjectAcl request, appears as the CanonicalUser. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // // To enable logging, you use LoggingEnabled and its children request elements. To -// disable logging, you use an empty BucketLoggingStatus request element: For -// more information about server access logging, see Server Access Logging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) -// in the Amazon S3 User Guide. For more information about creating a bucket, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . For more information about returning the logging status of a bucket, see -// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) -// . The following operations are related to PutBucketLogging : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// disable logging, you use an empty BucketLoggingStatus request element: +// +// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User +// Guide. +// +// For more information about creating a bucket, see [CreateBucket]. For more information about +// returning the logging status of a bucket, see [GetBucketLogging]. +// +// The following operations are related to PutBucketLogging : +// +// [PutObject] +// +// [DeleteBucket] +// +// [CreateBucket] +// +// [GetBucketLogging] +// +// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { if params == nil { params = &PutBucketLoggingInput{} @@ -75,19 +104,23 @@ type PutBucketLoggingInput struct { // This member is required. BucketLoggingStatus *types.BucketLoggingStatus - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutBucketLogging request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // The MD5 hash of the PutBucketLogging request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +132,7 @@ type PutBucketLoggingInput struct { } func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -132,25 +166,28 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -168,6 +205,21 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -177,7 +229,7 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { @@ -207,6 +259,18 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +300,10 @@ func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go index 099736c1..e386b1e9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -14,29 +14,44 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets a metrics -// configuration (specified by the metrics configuration ID) for the bucket. You -// can have up to 1,000 metrics configurations per bucket. If you're updating an -// existing metrics configuration, note that this is a full replacement of the -// existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to PutBucketMetricsConfiguration : -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// This operation is not supported for directory buckets. +// +// Sets a metrics configuration (specified by the metrics configuration ID) for +// the bucket. You can have up to 1,000 metrics configurations per bucket. If +// you're updating an existing metrics configuration, note that this is a full +// replacement of the existing metrics configuration. If you don't include the +// elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to PutBucketMetricsConfiguration : +// +// [DeleteBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] // // PutBucketMetricsConfiguration has the following special error: +// // - Error code: TooManyConfigurations +// // - Description: You are attempting to create a new configuration but have // already reached the 1,000-configuration limit. +// // - HTTP Status Code: HTTP 400 Bad Request +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { if params == nil { params = &PutBucketMetricsConfigurationInput{} @@ -79,6 +94,7 @@ type PutBucketMetricsConfigurationInput struct { } func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,25 +128,28 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -148,6 +167,18 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -157,7 +188,7 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { @@ -181,6 +212,18 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go index 7139f6ea..9100fb19 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -14,41 +14,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Enables notifications of -// specified events for a bucket. For more information about event notifications, -// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . Using this API, you can replace an existing notification configuration. The +// This operation is not supported for directory buckets. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see [Configuring Event Notifications]. +// +// Using this API, you can replace an existing notification configuration. The // configuration is an XML file that defines the event types that you want Amazon // S3 to publish and the destination where you want Amazon S3 to publish an event -// notification when it detects an event of the specified type. By default, your -// bucket has no event notifications configured. That is, the notification -// configuration will be an empty NotificationConfiguration . This action -// replaces the existing notification configuration with the configuration you -// include in the request body. After Amazon S3 receives this request, it first -// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon -// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner -// has permission to publish to it by sending a test notification. In the case of -// Lambda destinations, Amazon S3 verifies that the Lambda function permissions -// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For -// more information, see Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . You can disable notifications by adding the empty NotificationConfiguration -// element. For more information about the number of event notification -// configurations that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) -// in Amazon Web Services General Reference. By default, only the bucket owner can -// configure notifications on a bucket. However, bucket owners can use a bucket -// policy to grant permission to other users to set this configuration with the -// required s3:PutBucketNotification permission. The PUT notification is an atomic -// operation. For example, suppose your notification configuration includes SNS -// topic, SQS queue, and Lambda function configurations. When you send a PUT -// request with this configuration, Amazon S3 sends test messages to your SNS -// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will -// not add the configuration to your bucket. If the configuration in the request -// body includes only one TopicConfiguration specifying only the -// s3:ReducedRedundancyLostObject event type, the response will also include the -// x-amz-sns-test-message-id header containing the message ID of the test -// notification sent to the topic. The following action is related to -// PutBucketNotificationConfiguration : -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration . +// +// This action replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon Simple +// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) +// destination exists, and that the bucket owner has permission to publish to it by +// sending a test notification. In the case of Lambda destinations, Amazon S3 +// verifies that the Lambda function permissions grant Amazon S3 permission to +// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events]. +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// For more information about the number of event notification configurations that +// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with the required s3:PutBucketNotification +// permission. +// +// The PUT notification is an atomic operation. For example, suppose your +// notification configuration includes SNS topic, SQS queue, and Lambda function +// configurations. When you send a PUT request with this configuration, Amazon S3 +// sends test messages to your SNS topic. If the message fails, the entire PUT +// action will fail, and Amazon S3 will not add the configuration to your bucket. +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following action is related to PutBucketNotificationConfiguration : +// +// [GetBucketNotificationConfiguration] +// +// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { if params == nil { params = &PutBucketNotificationConfigurationInput{} @@ -90,6 +108,7 @@ type PutBucketNotificationConfigurationInput struct { } func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -123,25 +142,28 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -159,6 +181,18 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -168,7 +202,7 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { @@ -192,6 +226,18 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go index f89f86d6..96f27ccd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:PutBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html) -// . The following operations are related to PutBucketOwnershipControls : -// - GetBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For more +// information about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using object ownership]. +// +// The following operations are related to PutBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html +// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { if params == nil { params = &PutBucketOwnershipControlsInput{} @@ -51,9 +59,10 @@ type PutBucketOwnershipControlsInput struct { // This member is required. OwnershipControls *types.OwnershipControls - // The MD5 hash of the OwnershipControls request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // The MD5 hash of the OwnershipControls request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -65,6 +74,7 @@ type PutBucketOwnershipControlsInput struct { } func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -98,25 +108,28 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -134,6 +147,21 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -143,7 +171,7 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { @@ -173,6 +201,18 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -192,9 +232,10 @@ func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *a } func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: nil, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go index b3da186a..1764f492 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -15,48 +15,66 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - -// For directory buckets, you must make requests for this API operation to the -// Regional endpoint. These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the PutBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the PutBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:PutBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:PutBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following operations are related to PutBucketPolicy : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to PutBucketPolicy : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { if params == nil { params = &PutBucketPolicyInput{} @@ -74,68 +92,90 @@ func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInp type PutBucketPolicyInput struct { - // The name of the bucket. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The name of the bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string - // The bucket policy as a JSON document. For directory buckets, the only IAM - // action supported in the bucket policy is s3express:CreateSession . + // The bucket policy as a JSON document. + // + // For directory buckets, the only IAM action supported in the bucket policy is + // s3express:CreateSession . // // This member is required. Policy *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. This functionality is not supported - // for directory buckets. + // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. ConfirmRemoveSelfBucketAccess *bool - // The MD5 hash of the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. This functionality is not supported for directory - // buckets. + // The MD5 hash of the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -169,25 +209,28 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -205,6 +248,21 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -214,7 +272,7 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { @@ -244,6 +302,18 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -273,9 +343,10 @@ func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go index ddf58ad8..1b11ef39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -15,47 +15,71 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a replication -// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. Specify the replication configuration in the -// request body. In the replication configuration, you provide the name of the -// destination bucket or buckets where you want Amazon S3 to replicate objects, the -// IAM role that Amazon S3 can assume to replicate objects on your behalf, and -// other relevant information. You can invoke this request for a specific Amazon -// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) -// condition key. A replication configuration must include at least one rule, and -// can contain a maximum of 1,000. Each rule identifies a subset of objects to -// replicate by filtering the objects in the source bucket. To choose additional -// subsets of objects to replicate, add a rule for each subset. To specify a subset -// of the objects in the source bucket to apply a replication rule to, add the -// Filter element as a child of the Rule element. You can filter objects based on -// an object key prefix, one or more object tags, or both. When you add the Filter -// element in the configuration, you must also add the following elements: -// DeleteMarkerReplication , Status , and Priority . If you are using an earlier -// version of the replication configuration, Amazon S3 handles replication of -// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) -// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't +// This operation is not supported for directory buckets. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see [Replication]in the Amazon S3 User Guide. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets where +// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume +// to replicate objects on your behalf, and other relevant information. You can +// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion] +// aws:RequestedRegion condition key. +// +// A replication configuration must include at least one rule, and can contain a +// maximum of 1,000. Each rule identifies a subset of objects to replicate by +// filtering the objects in the source bucket. To choose additional subsets of +// objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. When +// you add the Filter element in the configuration, you must also add the following +// elements: DeleteMarkerReplication , Status , and Priority . +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// For information about enabling versioning on a bucket, see [Using Versioning]. +// +// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't // replicate objects that are stored at rest using server-side encryption with KMS // keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: // SourceSelectionCriteria , SseKmsEncryptedObjects , Status , // EncryptionConfiguration , and ReplicaKmsKeyID . For information about -// replication configuration, see Replicating Objects Created with SSE Using KMS -// keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html) -// . For information on PutBucketReplication errors, see List of -// replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// replication configuration, see [Replicating Objects Created with SSE Using KMS keys]. +// +// For information on PutBucketReplication errors, see [List of replication-related error codes] +// // Permissions To create a PutBucketReplication request, you must have -// s3:PutReplicationConfiguration permissions for the bucket. By default, a -// resource owner, in this case the Amazon Web Services account that created the -// bucket, can perform this operation. The resource owner can also grant others -// permissions to perform the operation. For more information about permissions, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . To perform this operation, the user or role performing the action must have -// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. The following operations are related to PutBucketReplication : -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// s3:PutReplicationConfiguration permissions for the bucket. +// +// By default, a resource owner, in this case the Amazon Web Services account that +// created the bucket, can perform this operation. The resource owner can also +// grant others permissions to perform the operation. For more information about +// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// To perform this operation, the user or role performing the action must have the [iam:PassRole] +// permission. +// +// The following operations are related to PutBucketReplication : +// +// [GetBucketReplication] +// +// [DeleteBucketReplication] +// +// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion +// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html +// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { if params == nil { params = &PutBucketReplicationInput{} @@ -84,21 +108,27 @@ type PutBucketReplicationInput struct { // This member is required. ReplicationConfiguration *types.ReplicationConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -113,6 +143,7 @@ type PutBucketReplicationInput struct { } func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -146,25 +177,28 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -182,6 +216,21 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -191,7 +240,7 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { @@ -221,6 +270,18 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -250,9 +311,10 @@ func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, b } func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go index d1dc5a76..a6d19c60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the request payment -// configuration for a bucket. By default, the bucket owner pays for downloads from -// the bucket. This configuration parameter enables the bucket owner (only) to -// specify that the person requesting the download will be charged for the -// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to PutBucketRequestPayment : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// This operation is not supported for directory buckets. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download will +// be charged for the download. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to PutBucketRequestPayment : +// +// [CreateBucket] +// +// [GetBucketRequestPayment] +// +// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { if params == nil { params = &PutBucketRequestPaymentInput{} @@ -50,21 +58,27 @@ type PutBucketRequestPaymentInput struct { // This member is required. RequestPaymentConfiguration *types.RequestPaymentConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -76,6 +90,7 @@ type PutBucketRequestPaymentInput struct { } func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -109,25 +124,28 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -145,6 +163,21 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -154,7 +187,7 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { @@ -184,6 +217,18 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -213,9 +258,10 @@ func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string } func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go index 725facc1..db5e62df 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -15,39 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the tags for a -// bucket. Use tags to organize your Amazon Web Services bill to reflect your own -// cost structure. To do this, sign up to get your Amazon Web Services account bill -// with tag key values included. Then, to see the cost of combined resources, -// organize your billing information according to resources with the same tag key -// values. For example, you can tag several resources with a specific application -// name, and then organize your billing information to see the total cost of that -// application across several services. For more information, see Cost Allocation -// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . When this operation sets the tags for a bucket, it will overwrite any current +// This operation is not supported for directory buckets. +// +// Sets the tags for a bucket. +// +// Use tags to organize your Amazon Web Services bill to reflect your own cost +// structure. To do this, sign up to get your Amazon Web Services account bill with +// tag key values included. Then, to see the cost of combined resources, organize +// your billing information according to resources with the same tag key values. +// For example, you can tag several resources with a specific application name, and +// then organize your billing information to see the total cost of that application +// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags]. +// +// When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to an -// existing list of tags. To use this operation, you must have permissions to -// perform the s3:PutBucketTagging action. The bucket owner has this permission by -// default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketTagging has the following special errors. For more Amazon S3 errors -// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// existing list of tags. +// +// To use this operation, you must have permissions to perform the +// s3:PutBucketTagging action. The bucket owner has this permission by default and +// can grant this permission to others. For more information about permissions, see +// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Using Cost -// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . +// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // bucket. // // The following operations are related to PutBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [GetBucketTagging] +// +// [DeleteBucketTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html +// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { if params == nil { params = &PutBucketTaggingInput{} @@ -75,21 +90,27 @@ type PutBucketTaggingInput struct { // This member is required. Tagging *types.Tagging - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -101,6 +122,7 @@ type PutBucketTaggingInput struct { } func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -134,25 +156,28 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -170,6 +195,21 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -179,7 +219,7 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { @@ -209,6 +249,18 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -238,9 +290,10 @@ func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go index c2b751ab..aa30d5fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -15,28 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the versioning state -// of an existing bucket. You can set the versioning state with one of the -// following values: Enabled—Enables versioning for the objects in the bucket. All -// objects added to the bucket receive a unique version ID. Suspended—Disables -// versioning for the objects in the bucket. All objects added to the bucket -// receive the version ID null. If the versioning state has never been set on a -// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// request does not return a versioning state value. In order to enable MFA Delete, -// you must be the bucket owner. If you are the bucket owner and want to enable MFA -// Delete in the bucket versioning configuration, you must include the x-amz-mfa -// request header and the Status and the MfaDelete request elements in a request -// to set the versioning state of the bucket. If you have an object expiration -// lifecycle configuration in your non-versioned bucket and you want to maintain -// the same permanent delete behavior when you enable versioning, you must add a -// noncurrent expiration policy. The noncurrent expiration lifecycle configuration -// will manage the deletes of the noncurrent object versions in the version-enabled -// bucket. (A version-enabled bucket maintains one current and zero or more -// noncurrent object versions.) For more information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config) -// . The following operations are related to PutBucketVersioning : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// This operation is not supported for directory buckets. +// +// When you enable versioning on a bucket for the first time, it might take a +// short amount of time for the change to be fully propagated. While this change is +// propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for +// requests to objects created or updated after enabling versioning. We recommend +// that you wait for 15 minutes after enabling versioning before issuing write +// operations ( PUT or DELETE ) on objects in the bucket. +// +// Sets the versioning state of an existing bucket. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added to +// the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects added +// to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a [GetBucketVersioning]request does not return a versioning state value. +// +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning +// configuration, you must include the x-amz-mfa request header and the Status and +// the MfaDelete request elements in a request to set the versioning state of the +// bucket. +// +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see [Lifecycle and Versioning]. +// +// The following operations are related to PutBucketVersioning : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetBucketVersioning] +// +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { if params == nil { params = &PutBucketVersioningInput{} @@ -64,21 +90,27 @@ type PutBucketVersioningInput struct { // This member is required. VersioningConfiguration *types.VersioningConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -94,6 +126,7 @@ type PutBucketVersioningInput struct { } func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -127,25 +160,28 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -163,6 +199,21 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -172,7 +223,7 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { @@ -202,6 +253,18 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -231,9 +294,10 @@ func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bo } func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go index 27555453..169b24bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -15,21 +15,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the configuration of -// the website that is specified in the website subresource. To configure a bucket -// as a website, you can add this subresource on the bucket with website -// configuration information such as the file name of the index document and any -// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This PUT action requires the S3:PutBucketWebsite permission. By default, only +// This operation is not supported for directory buckets. +// +// Sets the configuration of the website that is specified in the website +// subresource. To configure a bucket as a website, you can add this subresource on +// the bucket with website configuration information such as the file name of the +// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3]. +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, only // the bucket owner can configure the website attached to a bucket; however, bucket // owners can allow other users to set the website configuration by writing a -// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect -// all website requests sent to the bucket's website endpoint, you add a website -// configuration with the following elements. Because all requests are sent to -// another website, you don't need to provide index document name for the bucket. +// bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you add +// a website configuration with the following elements. Because all requests are +// sent to another website, you don't need to provide index document name for the +// bucket. +// // - WebsiteConfiguration +// // - RedirectAllRequestsTo +// // - HostName +// // - Protocol // // If you want granular control over redirects, you can use the following elements @@ -37,27 +45,47 @@ import ( // information about the redirect destination. In this case, the website // configuration must provide an index document for the bucket, because some // requests might not be redirected. +// // - WebsiteConfiguration +// // - IndexDocument +// // - Suffix +// // - ErrorDocument +// // - Key +// // - RoutingRules +// // - RoutingRule +// // - Condition +// // - HttpErrorCodeReturnedEquals +// // - KeyPrefixEquals +// // - Redirect +// // - Protocol +// // - HostName +// // - ReplaceKeyPrefixWith +// // - ReplaceKeyWith +// // - HttpRedirectCode // // Amazon S3 has a limitation of 50 routing rules per website configuration. If // you require more than 50 routing rules, you can use object redirect. For more -// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB. +// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide. +// +// The maximum request length is limited to 128 KB. +// +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { if params == nil { params = &PutBucketWebsiteInput{} @@ -85,21 +113,27 @@ type PutBucketWebsiteInput struct { // This member is required. WebsiteConfiguration *types.WebsiteConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -111,6 +145,7 @@ type PutBucketWebsiteInput struct { } func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -144,25 +179,28 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -180,6 +218,21 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -189,7 +242,7 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { @@ -219,6 +272,18 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -248,9 +313,10 @@ func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go index 1bade82e..1865d978 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -18,51 +18,73 @@ import ( ) // Adds an object to a bucket. +// // - Amazon S3 never adds partial objects; if you receive a success response, // Amazon S3 added the entire object to the bucket. You cannot use PutObject to // only update a single piece of metadata for an existing object. You must put the // entire object with updated metadata if you want to update some values. +// // - If your bucket uses the bucket owner enforced setting for Object Ownership, // ACLs are disabled and no longer affect permissions. All objects written to the // bucket by any account will be owned by the bucket owner. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Amazon S3 is a distributed system. If it receives multiple write requests for // the same object simultaneously, it overwrites all but the last object written. // However, Amazon S3 provides features that can modify this behavior: +// // - S3 Object Lock - To prevent objects from being deleted or overwritten, you -// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. +// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. +// +// - If-None-Match - Uploads the object only if the object key name does not +// already exist in the specified bucket. Otherwise, Amazon S3 returns a 412 +// Precondition Failed error. If a conflicting operation occurs during the +// upload, S3 returns a 409 ConditionalRequestConflict response. On a 409 +// failure, retry the upload. +// +// Expects the * character (asterisk). +// +// For more information, see [Add preconditions to S3 operations with conditional requests]in the Amazon S3 User Guide or [RFC 7232]. +// +// This functionality is not supported for S3 on Outposts. +// // - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 // receives multiple write requests for the same object simultaneously, it stores // all versions of the objects. For each write request that is made to the same // object, Amazon S3 automatically generates a unique version ID of that object // being stored in Amazon S3. You can retrieve, replace, or delete any version of -// the object. For more information about versioning, see Adding Objects to -// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) -// in the Amazon S3 User Guide. For information about returning the versioning -// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// . This functionality is not supported for directory buckets. +// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User +// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning] +// . +// +// This functionality is not supported for directory buckets. // // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your PutObject request includes specific headers. +// // - s3:PutObject - To successfully complete the PutObject request, you must // always have the s3:PutObject permission on a bucket to add an object to it. +// // - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject // request, you must have the s3:PutObjectAcl . +// // - s3:PutObjectTagging - To successfully set the tag-set with your PutObject // request, you must have the s3:PutObjectTagging . +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -70,24 +92,44 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Data integrity with Content-MD5 +// // - General purpose bucket - To ensure that data is not corrupted traversing // the network, use the Content-MD5 header. When you use this header, Amazon S3 // checks the object against the provided MD5 value and, if they do not match, // Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 // digest, you can calculate the MD5 while putting the object to Amazon S3 and // compare the returned ETag to the calculated MD5 value. +// // - Directory bucket - This functionality is not supported for directory // buckets. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about -// related Amazon S3 APIs, see the following: -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// For more information about related Amazon S3 APIs, see the following: +// +// [CopyObject] +// +// [DeleteObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html +// [Add preconditions to S3 operations with conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [RFC 7232]: https://datatracker.ietf.org/doc/rfc7232/ +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { if params == nil { params = &PutObjectInput{} @@ -105,31 +147,40 @@ func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns . type PutObjectInput struct { - // The bucket name to which the PUT action was initiated. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name to which the PUT action was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -139,26 +190,33 @@ type PutObjectInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. When adding a new object, you can use headers to - // grant ACL-based permissions to individual Amazon Web Services accounts or to - // predefined groups defined by Amazon S3. These permissions are then added to the - // ACL on the object. By default, all objects are private. Only the owner has full - // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) - // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses - // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and - // no longer affect permissions. Buckets that use this setting only accept PUT - // requests that don't specify an ACL or PUT requests that specify bucket owner - // full control ACLs, such as the bucket-owner-full-control canned ACL or an - // equivalent form of this ACL expressed in the XML format. PUT requests that - // contain other ACLs (for example, custom grants to certain Amazon Web Services - // accounts) fail and return a 400 error with the error code - // AccessControlListNotSupported . For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon + // S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions to + // individual Amazon Web Services accounts or to predefined groups defined by + // Amazon S3. These permissions are then added to the ACL on the object. By + // default, all objects are private. Only the owner has full access control. For + // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect + // permissions. Buckets that use this setting only accept PUT requests that don't + // specify an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL + // expressed in the XML format. PUT requests that contain other ACLs (for example, + // custom grants to certain Amazon Web Services accounts) fail and return a 400 + // error with the error code AccessControlListNotSupported . For more information, + // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Object data. @@ -166,103 +224,148 @@ type PutObjectInput struct { // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - // bucket-level settings for S3 Bucket Key. This functionality is not supported for - // directory buckets. + // + // General purpose buckets - Setting this header to true causes Amazon S3 to use + // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + // + // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool // Can be used to specify caching behavior along the request/reply chain. For more - // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) - // . + // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]. + // + // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 CacheControl *string // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any + // request to upload an object with a retention period configured using Amazon S3 + // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string - // Specifies presentational information for the object. For more information, see - // https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4) - // . + // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4]. + // + // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4 ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding) - // . + // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding ContentEncoding *string // The language the content is in. ContentLanguage *string // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length) - // . + // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length ContentLength *int64 - // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // The Base64 encoded 128-bit MD5 digest of the message (without the headers) // according to RFC 1864. This header can be used as a message integrity check to // verify that the data is the same data that was originally sent. Although it is // optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, see - // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // . The Content-MD5 header is required for any request to upload an object with a - // retention period configured using Amazon S3 Object Lock. For more information - // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // integrity check. For more information about REST request authentication, see [REST Authentication]. + // + // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any + // request to upload an object with a retention period configured using Amazon S3 + // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object ContentMD5 *string // A standard MIME type describing the format of the contents. For more - // information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type ContentType *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -271,146 +374,281 @@ type PutObjectInput struct { ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. For more - // information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]. + // + // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string + // Uploads the object only if the ETag (entity tag) value provided during the + // WRITE operation matches the ETag of the object in S3. If the ETag values do not + // match, the operation returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should fetch the + // object's ETag and retry the upload. + // + // Expects the ETag value as a string. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // Uploads the object only if the object key name does not already exist in the + // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should retry the + // upload. + // + // Expects the '*' (asterisk) character. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfNoneMatch *string + // A map of metadata to store with the object in S3. Metadata map[string]string // Specifies whether a legal hold will be applied to this object. For more - // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to this object. This functionality - // is not supported for directory buckets. + // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. This functionality is not supported for - // directory buckets. + // formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This value is stored as object - // metadata and automatically gets passed on to Amazon Web Services KMS for future - // GetObject or CopyObject operations on this object. This value must be - // explicitly added during CopyObject operations. This functionality is not - // supported for directory buckets. + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for object encryption. The value of this header is a + // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption + // context as key-value pairs. This value is stored as object metadata and + // automatically gets passed on to Amazon Web Services KMS for future GetObject + // operations on this object. + // + // General purpose buckets - This value must be explicitly added during CopyObject + // operations if you want an additional encryption context for your object. For + // more information, see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key - // Management Service (KMS) symmetric encryption customer managed key that was used - // for the object. If you specify x-amz-server-side-encryption:aws:kms or + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. If the KMS key doesn't exist in the same account that's issuing the + // command, you must use the full Key ARN not the Key ID. + // + // General purpose buckets - If you specify x-amz-server-side-encryption with + // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + // Alias) of the KMS key to use. If you specify + // x-amz-server-side-encryption:aws:kms or // x-amz-server-side-encryption:aws:kms:dsse , but do not provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web - // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not - // exist in the same account that's issuing the command, you must use the full ARN - // and not just the ID. This functionality is not supported for directory buckets. + // Services managed key ( aws/s3 ) to protect the data. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string // The server-side encryption algorithm that was used when you store this object - // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose - // buckets - You have four mutually exclusive options to protect data using - // server-side encryption in Amazon S3, depending on how you choose to manage the - // encryption keys. Specifically, the encryption key options are Amazon S3 managed - // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and - // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side - // encryption by using Amazon S3 managed keys (SSE-S3) by default. You can - // optionally tell Amazon S3 to encrypt data at rest by using server-side - // encryption with other key options. For more information, see Using Server-Side - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) - // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is - // supported. + // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // - General purpose buckets - You have four mutually exclusive options to + // protect data using server-side encryption in Amazon S3, depending on how you + // choose to manage the encryption keys. Specifically, the encryption key options + // are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You + // can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3 + // User Guide. + // + // - Directory buckets - For directory buckets, there are only two supported + // options for server-side encryption: server-side encryption with Amazon S3 + // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + // the desired encryption configuration and you don't override the bucket default + // encryption in your CreateSession requests or PUT object requests. Then, new + // objects are automatically encrypted with the desired encryption settings. For + // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + // encryption request headers must match the encryption settings that are specified + // in the CreateSession request. You can't override the values of the encryption + // settings ( x-amz-server-side-encryption , + // x-amz-server-side-encryption-aws-kms-key-id , + // x-amz-server-side-encryption-context , and + // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + // CreateSession request. You don't need to explicitly specify these encryption + // settings values in Zonal endpoint API calls, and Amazon S3 will use the + // encryption settings values from the CreateSession request to protect new + // objects in the directory bucket. + // + // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + // session token refreshes automatically to avoid service interruptions when a + // session expires. The CLI or the Amazon Web Services SDKs use the bucket's + // default encryption configuration for the CreateSession request. It's not + // supported to override the encryption settings values in the CreateSession + // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + // request headers must match the default encryption configuration of the directory + // bucket. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. - // - For directory buckets, only the S3 Express One Zone storage class is - // supported to store newly created objects. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone + // storage class) in Availability Zones and ONEZONE_IA (the S3 One + // Zone-Infrequent Access storage class) in Dedicated Local Zones. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. (For example, "Key1=Value1") This functionality is not supported for - // directory buckets. + // parameters. (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the // value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) - // in the Amazon S3 User Guide. In the following example, the request header sets - // the redirect to an object (anotherPage.html) in the same bucket: - // x-amz-website-redirect-location: /anotherPage.html In the following example, the - // request header sets the object redirect to another website: - // x-amz-website-redirect-location: http://www.example.com/ For more information - // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide. + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html WebsiteRedirectLocation *string + // Specifies the offset for appending data to existing objects in bytes. The + // offset must be equal to the size of the existing object being appended to. If no + // object exists, setting this header to 0 will create a new object. + // + // This functionality is only supported for objects in the Amazon S3 Express One + // Zone storage class in directory buckets. + WriteOffsetBytes *int64 + noSmithyDocumentSerde } func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -419,107 +657,149 @@ func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This header is + // present if the object was uploaded with the CRC64NVME checksum algorithm, or if + // it was uploaded without a checksum (and Amazon S3 added the default checksum, + // CRC64NVME , to the uploaded object). For more information about how checksums + // are calculated with multipart uploads, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string - // Entity tag for the uploaded object. General purpose buckets - To ensure that - // data is not corrupted traversing the network, for objects where the ETag is the - // MD5 digest of the object, you can calculate the MD5 while putting an object to - // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory - // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of - // the object. + // This header specifies the checksum type of the object, which determines how + // part-level checksums are combined to create an object-level checksum for + // multipart objects. For PutObject uploads, the checksum type is always + // FULL_OBJECT . You can use this header as a data integrity check to verify that + // the checksum type that is received is the same checksum that was specified. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + + // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing the + // network, for objects where the ETag is the MD5 digest of the object, you can + // calculate the MD5 while putting an object to Amazon S3 and compare the returned + // ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the MD5 + // digest of the object. ETag *string - // If the expiration is configured for the object (see - // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ) in the Amazon S3 User Guide, the response includes this header. It includes - // the expiry-date and rule-id key-value pairs that provide information about - // object expiration. The value of the rule-id is URL-encoded. This functionality - // is not supported for directory buckets. + // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User + // Guide, the response includes this header. It includes the expiry-date and + // rule-id key-value pairs that provide information about object expiration. The + // value of the rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory + // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This value is stored - // as object metadata and automatically gets passed on to Amazon Web Services KMS - // for future GetObject or CopyObject operations on this object. This - // functionality is not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + // This value is stored as object metadata and automatically gets passed on to + // Amazon Web Services KMS for future GetObject operations on this object. SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3. ServerSideEncryption types.ServerSideEncryption - // Version ID of the object. If you enable versioning for a bucket, Amazon S3 - // automatically generates a unique version ID for the object being stored. Amazon - // S3 returns this ID in the response. When you enable versioning for a bucket, if - // Amazon S3 receives multiple write requests for the same object simultaneously, - // it stores all of the objects. For more information about versioning, see Adding - // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) - // in the Amazon S3 User Guide. For information about returning the versioning - // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) - // . This functionality is not supported for directory buckets. + // The size of the object in bytes. This value is only be present if you append + // to an object. + // + // This functionality is only supported for objects in the Amazon S3 Express One + // Zone storage class in directory buckets. + Size *int64 + + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates a + // unique version ID for the object being stored. Amazon S3 returns this ID in the + // response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all of the + // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User + // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]. + // + // This functionality is not supported for directory buckets. + // + // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html + // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html VersionId *string // Metadata pertaining to the operation's result. @@ -550,25 +830,28 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -586,6 +869,21 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectValidationMiddleware(stack); err != nil { return err } @@ -598,7 +896,7 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = add100Continue(stack, options); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { @@ -628,6 +926,18 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -657,9 +967,10 @@ func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: true, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, @@ -703,6 +1014,8 @@ func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectI } clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) + result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, c.client.addOperationPutObjectMiddlewares, presignConverter(options).convertToPresignMiddleware, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go index 5716f550..82a5ceb4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -14,87 +14,152 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Uses the acl subresource -// to set the access control list (ACL) permissions for a new or existing object in -// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an -// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3 -// on Outposts. Depending on your application needs, you can choose to set the ACL -// on an object using either the request body or the headers. For example, if you -// have an existing application that updates a bucket ACL using the request body, -// you can continue to use that approach. For more information, see Access Control -// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions using -// one of the following methods: +// This operation is not supported for directory buckets. +// +// Uses the acl subresource to set the access control list (ACL) permissions for a +// new or existing object in an S3 bucket. You must have the WRITE_ACP permission +// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User +// Guide. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an object +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, you can +// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User +// Guide. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions using one of the following methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-ac l. If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use x-amz-acl // header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control List -// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants list objects permission to the two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-read: -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-read header grants list objects +// +// permission to the two Amazon Web Services accounts identified by their email +// addresses. +// +// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved -// to the CanonicalUser and, in a response to a GET Object acl request, appears as -// the CanonicalUser. Using email addresses to specify a grantee is only supported -// in the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>lt;/Grantee> +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // Versioning The ACL of an object is set at the object version level. By default, // PUT sets the ACL of the current version of an object. To set the ACL of a -// different version, use the versionId subresource. The following operations are -// related to PutObjectAcl : -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// different version, use the versionId subresource. +// +// The following operations are related to PutObjectAcl : +// +// [CopyObject] +// +// [GetObject] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { if params == nil { params = &PutObjectAclInput{} @@ -113,22 +178,27 @@ func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, op type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the ACL. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,8 +208,9 @@ type PutObjectAclInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // . + // The canned ACL to apply to the object. For more information, see [Canned ACL]. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL ACL types.ObjectCannedACL // Contains the elements that set the ACL permissions for an object per grantee. @@ -149,17 +220,23 @@ type PutObjectAclInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864.> (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.>] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -168,44 +245,54 @@ type PutObjectAclInput struct { ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for Amazon S3 on Outposts. + // bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for Amazon S3 on Outposts. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // Amazon S3 on Outposts. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -214,7 +301,9 @@ func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -245,25 +334,28 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -281,6 +373,21 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { return err } @@ -290,7 +397,7 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { @@ -320,6 +427,18 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -349,9 +468,10 @@ func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectAclRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go index 3fa38af3..949a84a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -14,9 +14,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Applies a legal hold -// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. +// This operation is not supported for directory buckets. +// +// Applies a legal hold configuration to the specified object. For more +// information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { if params == nil { params = &PutObjectLegalHoldInput{} @@ -35,15 +40,19 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalH type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold on. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -57,15 +66,19 @@ type PutObjectLegalHoldInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -81,10 +94,12 @@ type PutObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object that you want to place a legal hold on. @@ -94,6 +109,7 @@ type PutObjectLegalHoldInput struct { } func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -101,7 +117,9 @@ func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -132,25 +150,28 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -168,6 +189,21 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -177,7 +213,7 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { @@ -207,6 +243,18 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +284,10 @@ func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, boo } func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go index 49425c8f..e1d6a188 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -14,17 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object Lock -// configuration on the specified bucket. The rule specified in the Object Lock -// configuration will be applied by default to every new object placed in the -// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . +// This operation is not supported for directory buckets. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see [Locking Objects]. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. +// // - You can enable Object Lock for new or existing buckets. For more -// information, see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html) -// . +// information, see [Configuring Object Lock]. +// +// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { if params == nil { params = &PutObjectLockConfigurationInput{} @@ -51,15 +56,19 @@ type PutObjectLockConfigurationInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -74,10 +83,12 @@ type PutObjectLockConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // A token to allow Object Lock to be enabled for an existing bucket. @@ -87,6 +98,7 @@ type PutObjectLockConfigurationInput struct { } func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -94,7 +106,9 @@ func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParamet type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -125,25 +139,28 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -161,6 +178,21 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -170,7 +202,7 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { @@ -200,6 +232,18 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -229,9 +273,10 @@ func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (str } func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go index 5dfb98f3..1a697087 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -14,12 +14,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object -// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . Users or accounts require the s3:PutObjectRetention permission in order to -// place an Object Retention configuration on objects. Bypassing a Governance +// This operation is not supported for directory buckets. +// +// Places an Object Retention configuration on an object. For more information, +// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order +// to place an Object Retention configuration on objects. Bypassing a Governance // Retention configuration requires the s3:BypassGovernanceRetention permission. +// // This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { params = &PutObjectRetentionInput{} @@ -38,15 +42,20 @@ func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetent type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object - // Retention configuration to. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // Retention configuration to. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -64,15 +73,19 @@ type PutObjectRetentionInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -84,10 +97,12 @@ type PutObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The container element for the Object Retention configuration. @@ -101,6 +116,7 @@ type PutObjectRetentionInput struct { } func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -108,7 +124,9 @@ func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -139,25 +157,28 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -175,6 +196,21 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -184,7 +220,7 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { @@ -214,6 +250,18 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -243,9 +291,10 @@ func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, boo } func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go index 8b42d43d..bd680b21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -14,35 +14,50 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the supplied tag-set -// to an object that already exists in a bucket. A tag is a key-value pair. For -// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . You can associate tags with an object by sending a PUT request against the +// This operation is not supported for directory buckets. +// +// Sets the supplied tag-set to an object that already exists in a bucket. A tag +// is a key-value pair. For more information, see [Object Tagging]. +// +// You can associate tags with an object by sending a PUT request against the // tagging subresource that is associated with the object. You can retrieve tags by -// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// . For tagging-related restrictions related to characters and encodings, see Tag -// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// sending a GET request. For more information, see [GetObjectTagging]. +// +// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions]. +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// // To use this operation, you must have permission to perform the // s3:PutObjectTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. To put tags of any other version, use the -// versionId query parameter. You also need permission for the -// s3:PutObjectVersionTagging action. PutObjectTagging has the following special -// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// can grant this permission to others. +// +// To put tags of any other version, use the versionId query parameter. You also +// need permission for the s3:PutObjectVersionTagging action. +// +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Object -// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . +// the tag did not pass input validation. For more information, see [Object Tagging]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // object. // // The following operations are related to PutObjectTagging : -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// [GetObjectTagging] +// +// [DeleteObjectTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html +// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { if params == nil { params = &PutObjectTaggingInput{} @@ -60,23 +75,28 @@ func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingI type PutObjectTaggingInput struct { - // The bucket name containing the object. Access points - When you use this action - // with an access point, you must provide the alias of the access point in place of - // the bucket name or specify the access point ARN. When using the access point - // ARN, you must direct requests to the access point hostname. The access point - // hostname takes the form + // The bucket name containing the object. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -95,15 +115,19 @@ type PutObjectTaggingInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -115,10 +139,12 @@ type PutObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object that the tag-set will be added to. @@ -128,6 +154,7 @@ type PutObjectTaggingInput struct { } func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -165,25 +192,28 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -201,6 +231,21 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -210,7 +255,7 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { @@ -240,6 +285,18 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -269,9 +326,10 @@ func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go index ab0b5405..114bcc14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -15,22 +15,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported for directory buckets. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock configurations are different between the bucket and the // account, Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to PutPublicAccessBlock : -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to PutPublicAccessBlock : +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { if params == nil { params = &PutPublicAccessBlockInput{} @@ -56,9 +72,10 @@ type PutPublicAccessBlockInput struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more - // information about when Amazon S3 considers a bucket or object public, see The - // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon S3 User Guide. + // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in + // the Amazon S3 User Guide. + // + // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status // // This member is required. PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration @@ -67,15 +84,19 @@ type PutPublicAccessBlockInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutPublicAccessBlock request body. For requests made using - // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services - // SDKs, this field is calculated automatically. + // The MD5 hash of the PutPublicAccessBlock request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -87,6 +108,7 @@ type PutPublicAccessBlockInput struct { } func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -120,25 +142,28 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -156,6 +181,21 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -165,7 +205,7 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { @@ -195,6 +235,18 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -224,9 +276,10 @@ func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, b } func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go index a84b5326..6c4ec54b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -14,60 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Restores an archived copy -// of an object back into Amazon S3 This functionality is not supported for Amazon -// S3 on Outposts. This action performs the following types of requests: -// - select - Perform a select query on an archived object +// This operation is not supported for directory buckets. +// +// # Restores an archived copy of an object back into Amazon S3 +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// // - restore an archive - Restore an archived object // // For more information about the S3 structure in the request body, see the // following: -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon S3 User Guide -// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide -// -// Define the SQL expression for the SELECT type of restoration for your query in -// the request body's SelectParameters structure. You can use expressions like the -// following examples. -// - The following expression returns all records from the specified object. -// SELECT * FROM Object -// - Assuming that you are not using any headers for data stored in the object, -// you can specify columns with positional headers. SELECT s._1, s._2 FROM -// Object s WHERE s._3 > 100 -// - If you have headers and you set the fileHeaderInfo in the CSV structure in -// the request body to USE , you can specify headers in the query. (If you set -// the fileHeaderInfo field to IGNORE , the first row is skipped for the query.) -// You cannot mix ordinal positions with header column names. SELECT s.Id, -// s.FirstName, s.SSN FROM S3Object s -// -// When making a select request, you can also do the following: -// - To expedite your queries, specify the Expedited tier. For more information -// about tiers, see "Restoring Archives," later in this topic. -// - Specify details about the data serialization format of both the input -// object that is being queried and the serialization of the CSV-encoded query -// results. -// -// The following are additional important facts about the select feature: -// - The output results are new Amazon S3 objects. Unlike archive retrievals, -// they are stored until explicitly deleted-manually or through a lifecycle -// configuration. -// - You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests. -// - Amazon S3 accepts a select request even if the object has already been -// restored. A select request doesn’t return error response 409 . +// +// [PutObject] +// +// [Managing Access with ACLs] +// - in the Amazon S3 User Guide +// +// [Protecting Data Using Server-Side Encryption] +// - in the Amazon S3 User Guide // // Permissions To use this operation, you must have permissions to perform the // s3:RestoreObject action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Restoring objects Objects that you archive to the -// S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive -// storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers, are not accessible in real time. For objects in the S3 Glacier -// Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// +// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval +// or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or +// S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For +// objects in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage // classes, you must first initiate a restore request, and then wait until a // temporary copy of the object is available. If you want a permanent copy of the // object, create a copy of it in the Amazon S3 Standard storage class in your S3 @@ -75,61 +51,70 @@ import ( // duration (number of days) that you specify. For objects in the Archive Access or // Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a // restore request, and then wait until the object is moved into the Frequent -// Access tier. To restore a specific object version, you can provide a version ID. -// If you don't provide a version ID, Amazon S3 restores the current version. When -// restoring an archived object, you can specify one of the following data access -// tier options in the Tier element of the request body: +// Access tier. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: +// // - Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or -// S3 Intelligent-Tiering Archive tier when occasional urgent requests for -// restoring archives are required. For all but the largest archived objects (250 -// MB+), data accessed using Expedited retrievals is typically made available -// within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for -// Expedited retrievals is available when you need it. Expedited retrievals and -// provisioned capacity are not available for objects stored in the S3 Glacier Deep -// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// stored in the S3 Glacier Flexible Retrieval storage class or S3 +// Intelligent-Tiering Archive tier when occasional urgent requests for restoring +// archives are required. For all but the largest archived objects (250 MB+), data +// accessed using Expedited retrievals is typically made available within 1–5 +// minutes. Provisioned capacity ensures that retrieval capacity for Expedited +// retrievals is available when you need it. Expedited retrievals and provisioned +// capacity are not available for objects stored in the S3 Glacier Deep Archive +// storage class or S3 Intelligent-Tiering Deep Archive tier. +// // - Standard - Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for retrieval requests // that do not specify the retrieval option. Standard retrievals typically finish -// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval -// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They -// typically finish within 12 hours for objects stored in the S3 Glacier Deep -// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard -// retrievals are free for objects stored in S3 Intelligent-Tiering. +// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval storage +// class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 +// hours for objects stored in the S3 Glacier Deep Archive storage class or S3 +// Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects +// stored in S3 Intelligent-Tiering. +// // - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible // Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve // large amounts, even petabytes, of data at no cost. Bulk retrievals typically // finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval -// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk -// retrievals are also the lowest-cost retrieval option when restoring objects from -// S3 Glacier Deep Archive. They typically finish within 48 hours for objects -// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering -// Deep Archive tier. +// storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also +// the lowest-cost retrieval option when restoring objects from S3 Glacier Deep +// Archive. They typically finish within 48 hours for objects stored in the S3 +// Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. // // For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to -// change the restore speed to a faster speed while it is in progress. For more -// information, see Upgrading the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon S3 User Guide. To get the status of object restoration, you can -// send a HEAD request. Operations return the x-amz-restore header, which provides -// information about the restoration status, in the response. You can use Amazon S3 -// event notifications to notify you when a restore is initiated or completed. For -// more information, see Configuring Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon S3 User Guide. After restoring an archived object, you can update -// the restoration period by reissuing the request with a new period. Amazon S3 -// updates the restoration period relative to the current time and charges only for -// the request-there are no data transfer charges. You cannot update the -// restoration period when Amazon S3 is actively processing your current restore -// request for the object. If your bucket has a lifecycle configuration with a rule -// that includes an expiration action, the object expiration overrides the life -// span that you specify in a restore request. For example, if you restore an -// object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon -// S3 deletes the object in 3 days. For more information about lifecycle -// configuration, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon S3 User Guide. Responses A successful action returns either the 200 OK -// or 202 Accepted status code. +// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to a +// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon +// S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. +// Operations return the x-amz-restore header, which provides information about +// the restoration status, in the response. You can use Amazon S3 event +// notifications to notify you when a restore is initiated or completed. For more +// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period by +// reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there are +// no data transfer charges. You cannot update the restoration period when Amazon +// S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy for 10 +// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the +// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management] +// in Amazon S3 User Guide. +// +// Responses A successful action returns either the 200 OK or 202 Accepted status +// code. // // - If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -141,8 +126,7 @@ import ( // // - Code: RestoreAlreadyInProgress // -// - Cause: Object restore is already in progress. (This error does not apply to -// SELECT type requests.) +// - Cause: Object restore is already in progress. // // - HTTP Status Code: 409 Conflict // @@ -160,8 +144,22 @@ import ( // - SOAP Fault Code Prefix: N/A // // The following operations are related to RestoreObject : -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketNotificationConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { if params == nil { params = &RestoreObjectInput{} @@ -179,23 +177,28 @@ func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, type RestoreObjectInput struct { - // The bucket name containing the object to restore. Access points - When you use - // this action with an access point, you must provide the alias of the access point - // in place of the bucket name or specify the access point ARN. When using the - // access point ARN, you must direct requests to the access point hostname. The - // access point hostname takes the form + // The bucket name containing the object to restore. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -209,10 +212,13 @@ type RestoreObjectInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -224,10 +230,12 @@ type RestoreObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Container for restore job parameters. @@ -240,6 +248,7 @@ type RestoreObjectInput struct { } func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -247,7 +256,9 @@ func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Indicates the path in the provided S3 output location where Select results will @@ -282,25 +293,28 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -318,6 +332,21 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { return err } @@ -327,7 +356,7 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { @@ -354,6 +383,18 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -383,9 +424,10 @@ func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { } func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getRestoreObjectRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go index 888ec9c2..2dfa63c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -11,71 +11,97 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithysync "github.com/aws/smithy-go/sync" - smithyhttp "github.com/aws/smithy-go/transport/http" "sync" ) -// This operation is not supported by directory buckets. This action filters the -// contents of an Amazon S3 object based on a simple structured query language -// (SQL) statement. In the request, along with the SQL expression, you must also -// specify a data serialization format (JSON, CSV, or Apache Parquet) of the -// object. Amazon S3 uses this format to parse object data into records, and -// returns only records that match the specified SQL expression. You must also -// specify the data serialization format for the response. This functionality is -// not supported for Amazon S3 on Outposts. For more information about Amazon S3 -// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) -// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject -// permission for this operation. Amazon S3 Select does not support anonymous -// access. For more information about permissions, see Specifying Permissions in a -// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to -// query objects that have the following format properties: +// This operation is not supported for directory buckets. +// +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the SQL +// expression, you must also specify a data serialization format (JSON, CSV, or +// Apache Parquet) of the object. Amazon S3 uses this format to parse object data +// into records, and returns only records that match the specified SQL expression. +// You must also specify the data serialization format for the response. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User +// Guide. +// +// Permissions You must have the s3:GetObject permission for this operation. +// Amazon S3 Select does not support anonymous access. For more information about +// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. +// +// Object Data Formats You can use Amazon S3 Select to query objects that have the +// following format properties: +// // - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// // - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// // - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. // GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports // for CSV and JSON files. Amazon S3 Select supports columnar compression for // Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object // compression for Parquet objects. +// // - Server-side encryption - Amazon S3 Select supports querying objects that -// are protected with server-side encryption. For objects that are encrypted with -// customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use -// the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 -// managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side -// encryption is handled transparently, so you don't need to specify anything. For -// more information about server-side encryption, including SSE-S3 and SSE-KMS, see -// Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide. +// are protected with server-side encryption. +// +// For objects that are encrypted with customer-provided encryption keys (SSE-C), +// +// you must use HTTPS, and you must use the headers that are documented in the [GetObject]. +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide. +// +// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon +// +// Web Services KMS keys (SSE-KMS), server-side encryption is handled +// transparently, so you don't need to specify anything. For more information about +// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3 +// User Guide. // // Working with the Response Body Given the response size is unknown, Amazon S3 // Select streams the response as a series of messages and includes a // Transfer-Encoding header with chunked as its value in the response. For more -// information, see Appendix: SelectObjectContent Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) -// . GetObject Support The SelectObjectContent action does not support the -// following GetObject functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . +// information, see [Appendix: SelectObjectContent Response]. +// +// GetObject Support The SelectObjectContent action does not support the following +// GetObject functionality. For more information, see [GetObject]. +// // - Range : Although you can specify a scan range for an Amazon S3 Select -// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) -// in the request parameters), you cannot specify the range of bytes of an object -// to return. +// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of +// bytes of an object to return. +// // - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the // ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING // storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or // REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or // DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For -// more information about storage classes, see Using Amazon S3 storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) -// in the Amazon S3 User Guide. +// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide. +// +// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes] // -// Special Errors For a list of special errors for this operation, see List of -// SELECT Object Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // The following operations are related to SelectObjectContent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// [GetObject] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration] +// +// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html +// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange +// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html +// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { if params == nil { params = &SelectObjectContentInput{} @@ -91,14 +117,18 @@ func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectCo return out, nil } +// Learn Amazon S3 Select is no longer available to new customers. Existing +// customers of Amazon S3 Select can continue to use the feature as usual. [Learn more] +// // Request to filter the contents of an Amazon S3 object based on a simple // Structured Query Language (SQL) statement. In the request, along with the SQL // expression, you must specify a data serialization format (JSON or CSV) of the // object. Amazon S3 uses this to parse object data into records. It returns only // records that match the specified SQL expression. You must also specify the data -// serialization format for the response. For more information, see S3Select API -// Documentation (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) -// . +// serialization format for the response. For more information, see [S3Select API Documentation]. +// +// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html type SelectObjectContentInput struct { // The S3 bucket. @@ -141,30 +171,37 @@ type SelectObjectContentInput struct { // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. ScanRange may be - // used in the following ways: + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRange may be used in the following ways: + // // - 50100 - process only the records starting between the bytes 50 and 100 // (inclusive, counting from zero) + // // - 50 - process only the records starting after the byte 50 + // // - 50 - process only the records within the last 50 bytes of the file. ScanRange *types.ScanRange @@ -172,6 +209,7 @@ type SelectObjectContentInput struct { } func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -215,25 +253,28 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -245,6 +286,18 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { return err } @@ -254,7 +307,7 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { @@ -278,6 +331,18 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go index 53507fba..5d1fc2d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -16,58 +16,84 @@ import ( "io" ) -// Uploads a part in a multipart upload. In this operation, you provide new data -// as a part of an object in your request. However, you have an option to specify -// your existing Amazon S3 object as a data source for the part you are uploading. -// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// ) before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier that you must include in your -// upload part request. Part numbers can be any number from 1 to 10,000, inclusive. -// A part number uniquely identifies a part and also defines its position within -// the object being created. If you upload a new part using the same part number -// that was used with a previous part, the previously uploaded part is overwritten. +// Uploads a part in a multipart upload. +// +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as a data +// source for the part you are uploading. To upload a part from an existing object, +// you use the [UploadPartCopy]operation. +// +// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In +// response to your initiate request, Amazon S3 returns an upload ID, a unique +// identifier that you must include in your upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object being +// created. If you upload a new part using the same part number that was used with +// a previous part, the previously uploaded part is overwritten. +// // For information about maximum and minimum part sizes and other multipart upload -// specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. After you initiate multipart upload and upload one -// or more parts, you must either complete or abort multipart upload in order to -// stop getting charged for storage of the uploaded parts. Only after you either -// complete or abort multipart upload, Amazon S3 frees up the parts storage and -// stops charging you for the parts storage. For more information on multipart -// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you -// must make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions -// - General purpose bucket permissions - For information on the permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . -// -// Data integrity General purpose bucket - To ensure that data is not corrupted +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged for +// storage of the uploaded parts. Only after you either complete or abort multipart +// upload, Amazon S3 frees up the parts storage and stops charging you for the +// parts storage. +// +// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - To perform a multipart upload with +// encryption using an Key Management Service key, the requester must have +// permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The +// requester must also have permissions for the kms:GenerateDataKey action for +// the CreateMultipartUpload API. Then, the requester needs permissions for the +// kms:Decrypt action on the UploadPart and UploadPartCopy APIs. +// +// These permissions are required because Amazon S3 must decrypt and read data +// +// from the encrypted file parts before it completes the multipart upload. For more +// information about KMS permissions, see [Protecting data using server-side encryption with KMS]in the Amazon S3 User Guide. For +// information about the permissions required to use the multipart upload API, see [Multipart upload and permissions] +// and [Multipart upload API and permissions]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// Data integrity General purpose bucket - To ensure that data is not corrupted // traversing the network, specify the Content-MD5 header in the upload part // request. Amazon S3 checks the part data against the provided MD5 value. If they // do not match, Amazon S3 returns an error. If the upload request is signed with // Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 -// header as a checksum instead of Content-MD5 . For more information see -// Authenticating Requests: Using the Authorization Header (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) -// . Directory buckets - MD5 is not supported by directory buckets. You can use -// checksum algorithms to check object integrity. Encryption +// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]. +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption // - General purpose bucket - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. You have mutually exclusive options to @@ -78,37 +104,76 @@ import ( // encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally // tell Amazon S3 to encrypt data at rest using server-side encryption with other // key options. The option you use depends on whether you want to use KMS keys -// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is -// supported by the S3 Multipart Upload operations. Unless you are using a -// customer-provided encryption key (SSE-C), you don't need to specify the -// encryption parameters in each UploadPart request. Instead, you only need to -// specify the server-side encryption parameters in the initial Initiate Multipart -// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . If you request server-side encryption using a customer-provided encryption key -// (SSE-C) in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following request headers. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 -// - Directory bucket - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. -// -// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon S3 User Guide. Special errors +// (SSE-KMS) or provide your own encryption key (SSE-C). +// +// Server-side encryption is supported by the S3 Multipart Upload operations. +// +// Unless you are using a customer-provided encryption key (SSE-C), you don't need +// to specify the encryption parameters in each UploadPart request. Instead, you +// only need to specify the server-side encryption parameters in the initial +// Initiate Multipart request. For more information, see [CreateMultipartUpload]. +// +// If you request server-side encryption using a customer-provided encryption key +// +// (SSE-C) in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following request headers. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information, see [Using Server-Side Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). +// +// Special errors +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPart : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to UploadPart : +// +// [CreateMultipartUpload] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { if params == nil { params = &UploadPartInput{} @@ -126,31 +191,40 @@ func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns type UploadPartInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -178,50 +252,67 @@ type UploadPartInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must - // be the same for all parts and it match the checksum value supplied in the - // CreateMultipartUpload request. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // This checksum algorithm must be the same for all parts and it match the + // checksum value supplied in the CreateMultipartUpload request. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Size of the body in bytes. This parameter is useful when the size of the body // cannot be determined automatically. ContentLength *int64 - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // The Base64 encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. This functionality is not supported for - // directory buckets. + // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -233,14 +324,17 @@ type UploadPartInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -248,20 +342,23 @@ type UploadPartInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header . This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported for directory buckets. + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -270,73 +367,89 @@ func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Entity tag for the uploaded object. ETag *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -367,25 +480,28 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -403,6 +519,21 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadPartValidationMiddleware(stack); err != nil { return err } @@ -415,7 +546,7 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = add100Continue(stack, options); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { @@ -445,6 +576,18 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -474,9 +617,10 @@ func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { } func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getUploadPartRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: true, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, @@ -521,6 +665,8 @@ func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPar } clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) + result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, c.client.addOperationUploadPartMiddlewares, presignConverter(options).convertToPresignMiddleware, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go index 1d48a7be..9cd55c70 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -18,90 +18,166 @@ import ( // Uploads a part by copying data from an existing object as data source. To // specify the data source, you add the request header x-amz-copy-source in your // request. To specify a byte range, you add the request header -// x-amz-copy-source-range in your request. For information about maximum and -// minimum part sizes and other multipart upload specifications, see Multipart -// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. Instead of copying data from an existing object as -// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// action to upload new data as a part of an object in your request. You must -// initiate a multipart upload before you can upload any part. In response to your -// initiate request, Amazon S3 returns the upload ID, a unique identifier that you -// must include in your upload part request. For conceptual information about -// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. For information about copying objects using a -// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use IAM credentials to authenticate and authorize +// x-amz-copy-source-range in your request. +// +// For information about maximum and minimum part sizes and other multipart upload +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// Instead of copying data from an existing object as part data, you might use the [UploadPart] +// action to upload new data as a part of an object in your request. +// +// You must initiate a multipart upload before you can upload any part. In +// response to your initiate request, Amazon S3 returns the upload ID, a unique +// identifier that you must include in your upload part request. +// +// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User +// Guide. For information about copying objects using a single atomic action vs. a +// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Authentication and authorization All UploadPartCopy requests must be +// authenticated and signed by using IAM credentials (access key ID and secret +// access key for the IAM identities). All headers with the x-amz- prefix, +// including x-amz-copy-source , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use IAM credentials to authenticate and authorize // your access to the UploadPartCopy API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions You must have READ access to the source object and WRITE access to // the destination bucket. +// // - General purpose bucket permissions - You must have the permissions in a // policy based on the bucket types of your source bucket and destination bucket in // an UploadPartCopy operation. +// // - If the source object is in a general purpose bucket, you must have the // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have the -// s3:PubObject permission to write the object copy to the destination bucket. -// For information about permissions required to use the multipart upload API, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// s3:PutObject permission to write the object copy to the destination bucket. +// +// - To perform a multipart upload with encryption using an Key Management +// Service key, the requester must have permission to the kms:Decrypt and +// kms:GenerateDataKey actions on the key. The requester must also have +// permissions for the kms:GenerateDataKey action for the CreateMultipartUpload +// API. Then, the requester needs permissions for the kms:Decrypt action on the +// UploadPart and UploadPartCopy APIs. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before it +// completes the multipart upload. For more information about KMS permissions, see [Protecting data using server-side encryption with KMS] +// in the Amazon S3 User Guide. For information about the permissions required to +// use the multipart upload API, see [Multipart upload and permissions]and [Multipart upload API and permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in an UploadPartCopy operation. +// // - If the source object that you want to copy is in a directory bucket, you // must have the s3express:CreateSession permission in the Action element of a -// policy to read the object . By default, the session is in the ReadWrite mode. +// policy to read the object. By default, the session is in the ReadWrite mode. // If you want to restrict the access, you can explicitly set the // s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// // - If the copy destination is a directory bucket, you must have the // s3express:CreateSession permission in the Action element of a policy to write // the object to the destination. The s3express:SessionMode condition key cannot -// be set to ReadOnly on the copy destination. For example policies, see Example -// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// be set to ReadOnly on the copy destination. +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Encryption +// // - General purpose buckets - For information about using server-side // encryption with customer-provided encryption keys with the UploadPartCopy -// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// . -// - Directory buckets - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// operation, see [CopyObject]and [UploadPart]. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). For more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// For directory buckets, when you perform a CreateMultipartUpload operation and an +// +// UploadPartCopy operation, the request headers you provide in the +// CreateMultipartUpload request must match the default encryption configuration +// of the destination bucket. +// +// S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from +// +// general purpose buckets to directory buckets, from directory buckets to general +// purpose buckets, or between directory buckets, through [UploadPartCopy]. In this case, Amazon +// S3 makes a call to KMS every time a copy request is made for a KMS-encrypted +// object. // // Special errors +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - Error Code: InvalidRequest +// // - Description: The specified copy source is not supported as a byte-range // copy source. +// // - HTTP Status Code: 400 Bad Request // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPartCopy : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to UploadPartCopy : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { if params == nil { params = &UploadPartCopyInput{} @@ -119,43 +195,60 @@ func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput type UploadPartCopyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Copying objects across different Amazon Web Services Regions isn't supported + // when the source or destination bucket is in Amazon Web Services Local Zones. The + // source and destination buckets must have the same parent Amazon Web Services + // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code + // InvalidRequest . + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string // Specifies the source object for the copy operation. You specify the value in // one of two formats, depending on whether you want to access the source object - // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the bucket // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must // be URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -163,28 +256,39 @@ type UploadPartCopyInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your bucket has versioning enabled, you could have multiple versions of the // same object. By default, x-amz-copy-source identifies the current version of // the source object to copy. To copy a specific version of the source object to // copy, append ?versionId= to the x-amz-copy-source request header (for example, // x-amz-copy-source: // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). If the current version is a delete marker and you don't specify a versionId - // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found - // error, because the object does not exist. If you specify versionId in the + // ). + // + // If the current version is a delete marker and you don't specify a versionId in + // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, + // because the object does not exist. If you specify versionId in the // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an // HTTP 400 Bad Request error, because you are not allowed to specify a delete - // marker as a version for the x-amz-copy-source . Directory buckets - S3 - // Versioning isn't enabled and supported for directory buckets. + // marker as a version for the x-amz-copy-source . + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -205,34 +309,56 @@ type UploadPartCopyInput struct { // This member is required. UploadId *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both of - // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since - // headers are present in the request as follows: x-amz-copy-source-if-none-match - // condition evaluates to false , and; x-amz-copy-source-if-modified-since - // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed - // response code. + // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both of the x-amz-copy-source-if-none-match and + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both of the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: x-amz-copy-source-if-none-match condition evaluates to false , and; - // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 - // returns 412 Precondition Failed response code. + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time // The range of bytes to copy from the source object. The range value must use the @@ -243,20 +369,26 @@ type UploadPartCopyInput struct { CopySourceRange *string // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). This functionality is not supported when the source object is in a - // directory bucket. + // AES256 ). + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. This functionality is not supported - // when the source object is in a directory bucket. + // was used when the source object was created. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the source object is in a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -273,15 +405,18 @@ type UploadPartCopyInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported when the destination bucket is a - // directory bucket. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -289,21 +424,25 @@ type UploadPartCopyInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported when the destination bucket is a directory + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } @@ -311,42 +450,44 @@ func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Container for all response elements. CopyPartResult *types.CopyPartResult // The version of the source object that was copied, if you have enabled - // versioning on the source bucket. This functionality is not supported when the - // source object is in a directory bucket. + // versioning on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -377,25 +518,28 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -413,6 +557,18 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { return err } @@ -422,7 +578,7 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { @@ -449,6 +605,18 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go index ac90ff70..9a9bbced 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -18,42 +18,54 @@ import ( "time" ) -// This operation is not supported by directory buckets. Passes transformed -// objects to a GetObject operation when using Object Lambda access points. For -// information about Object Lambda access points, see Transforming objects with -// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) -// in the Amazon S3 User Guide. This operation supports metadata that can be -// returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and -// ErrorMessage . The GetObject response metadata is supported so that the -// WriteGetObjectResponse caller, typically an Lambda function, can provide the -// same metadata when it internally invokes GetObject . When WriteGetObjectResponse -// is called by a customer-owned Lambda function, the metadata returned to the end -// user GetObject call might differ from what Amazon S3 would normally return. You -// can include any number of metadata headers. When including a metadata header, it -// should be prefaced with x-amz-meta . For example, x-amz-meta-my-custom-header: -// MyCustomValue . The primary use case for this is to forward GetObject metadata. +// This operation is not supported for directory buckets. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the +// Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by [GetObject], in addition to +// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The +// GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when it +// internally invokes GetObject . When WriteGetObjectResponse is called by a +// customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// You can include any number of metadata headers. When including a metadata +// header, it should be prefaced with x-amz-meta . For example, +// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to +// forward GetObject metadata. +// // Amazon Web Services provides some prebuilt Lambda functions that you can use // with S3 Object Lambda to detect and redact personally identifiable information // (PII) and decompress S3 objects. These Lambda functions are available in the // Amazon Web Services Serverless Application Repository, and can be selected // through the Amazon Web Services Management Console when you create your Object -// Lambda access point. Example 1: PII Access Control - This Lambda function uses -// Amazon Comprehend, a natural language processing (NLP) service using machine -// learning to find insights and relationships in text. It automatically detects -// personally identifiable information (PII) such as names, addresses, dates, -// credit card numbers, and social security numbers from documents in your Amazon -// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon -// Comprehend, a natural language processing (NLP) service using machine learning -// to find insights and relationships in text. It automatically redacts personally +// Lambda access point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally // identifiable information (PII) such as names, addresses, dates, credit card // numbers, and social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// // Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is // equipped to decompress objects stored in S3 in one of six compressed file -// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information -// on how to view and use these functions, see Using Amazon Web Services built -// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) -// in the Amazon S3 User Guide. +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3 +// User Guide. +// +// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html +// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { if params == nil { params = &WriteGetObjectResponseInput{} @@ -88,7 +100,7 @@ type WriteGetObjectResponseInput struct { // The object data. Body io.Reader - // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for // server-side encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool @@ -96,47 +108,67 @@ type WriteGetObjectResponseInput struct { CacheControl *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 32-bit CRC32 checksum of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 32-bit CRC32C checksum of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 160-bit SHA1 digest of the object returned by the Object Lambda // function. This may not match the checksum for the object stored in Amazon S3. // Amazon S3 will perform validation of the checksum values only when the original // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 256-bit SHA-256 digest of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 256-bit SHA256 digest of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Specifies presentational information for the object. @@ -160,7 +192,9 @@ type WriteGetObjectResponseInput struct { ContentType *string // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) - // a delete marker. + // a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // An opaque identifier assigned by a web server to a specific version of a @@ -205,8 +239,9 @@ type WriteGetObjectResponseInput struct { ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) - // . + // more information about S3 Object Lock, see [Object Lock]. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when Object Lock is configured to expire. @@ -216,12 +251,15 @@ type WriteGetObjectResponseInput struct { PartsCount *int32 // Indicates if request involves bucket that is either a source or destination in - // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) - // . + // a Replication rule. For more information about S3 Replication, see [Replication]. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration operation and expiration time of @@ -232,43 +270,59 @@ type WriteGetObjectResponseInput struct { // encryption key was specified for object stored in Amazon S3. SSECustomerAlgorithm *string - // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to - // encrypt data stored in S3. For more information, see Protecting data using - // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) - // . + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]. + // + // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web // Services Key Management Service (Amazon Web Services KMS) symmetric encryption // customer managed key that was used for stored in Amazon S3 object. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing requested object in + // The server-side encryption algorithm used when storing requested object in // Amazon S3 (for example, AES256, aws:kms ). ServerSideEncryption types.ServerSideEncryption // The integer status code for an HTTP response of a corresponding GetObject // request. The following is a list of status codes. + // // - 200 - OK + // // - 206 - Partial Content + // // - 304 - Not Modified + // // - 400 - Bad Request + // // - 401 - Unauthorized + // // - 403 - Forbidden + // // - 404 - Not Found + // // - 405 - Method Not Allowed + // // - 409 - Conflict + // // - 411 - Length Required + // // - 412 - Precondition Failed + // // - 416 - Range Not Satisfiable + // // - 500 - Internal Server Error + // // - 503 - Service Unavailable StatusCode *int32 // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The number of tags, if any, on the object. @@ -314,28 +368,31 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil { + if err = addUnsignedPayload(stack); err != nil { return err } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + if err = addContentSHA256Header(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -353,6 +410,18 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { return err } @@ -365,7 +434,7 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { @@ -389,6 +458,18 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go index 6ef631bd..6faedcc0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go @@ -8,16 +8,18 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } -func bindAuthEndpointParams(params *AuthResolverParameters, input interface{}, options Options) { - params.endpointParams = bindEndpointParams(input, options) +func bindAuthEndpointParams(ctx context.Context, params *AuthResolverParameters, input interface{}, options Options) { + params.endpointParams = bindEndpointParams(ctx, input, options) } type setLegacyContextSigningOptionsMiddleware struct { @@ -98,13 +100,13 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthEndpointParams(params, input, options) - bindAuthParamsRegion(params, input, options) + bindAuthEndpointParams(ctx, params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -179,7 +181,10 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -191,6 +196,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -250,7 +258,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -260,12 +271,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -281,6 +300,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -290,6 +310,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -310,9 +333,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go index 2be5df30..f1e5ac02 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -20,13 +20,23 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestxml_deserializeOpAbortMultipartUpload struct { } @@ -42,6 +52,10 @@ func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -58,6 +72,7 @@ func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -132,6 +147,10 @@ func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -176,6 +195,7 @@ func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -321,6 +341,19 @@ func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteM sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -347,6 +380,19 @@ func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteM sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -415,6 +461,10 @@ func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -459,6 +509,7 @@ func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Conte } } + span.End() return out, metadata, err } @@ -623,6 +674,10 @@ func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -639,6 +694,7 @@ func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -701,6 +757,86 @@ func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutpu return nil } +type awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response, &metadata) + } + output := &CreateBucketMetadataTableConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestxml_deserializeOpCreateMultipartUpload struct { } @@ -716,6 +852,10 @@ func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -760,6 +900,7 @@ func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -836,6 +977,11 @@ func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMu v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.RequestCharged = types.RequestCharged(headerValues[0]) @@ -958,6 +1104,10 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -969,6 +1119,11 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co output := &CreateSessionOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -997,6 +1152,7 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -1043,6 +1199,37 @@ func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, m } } +func awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(v *CreateSessionOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -1100,6 +1287,10 @@ func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1117,6 +1308,7 @@ func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -1175,6 +1367,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1192,6 +1388,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -1250,6 +1447,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1267,6 +1468,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -1325,6 +1527,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1342,6 +1548,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx c } } + span.End() return out, metadata, err } @@ -1400,6 +1607,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) Ha return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1417,6 +1628,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) Ha } } + span.End() return out, metadata, err } @@ -1475,6 +1687,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1492,6 +1708,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -1550,6 +1767,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1567,6 +1788,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -1610,6 +1832,86 @@ func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Res } } +type awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response, &metadata) + } + output := &DeleteBucketMetadataTableConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { } @@ -1625,6 +1927,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1642,6 +1948,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -1700,6 +2007,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1717,6 +2028,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserializ } } + span.End() return out, metadata, err } @@ -1775,6 +2087,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1792,6 +2108,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -1850,6 +2167,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1867,6 +2188,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -1925,6 +2247,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1942,6 +2268,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -2000,6 +2327,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2017,6 +2348,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -2075,6 +2407,10 @@ func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2091,6 +2427,7 @@ func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -2176,6 +2513,10 @@ func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2220,6 +2561,7 @@ func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -2338,6 +2680,10 @@ func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2354,6 +2700,7 @@ func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx cont return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -2425,6 +2772,10 @@ func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2442,6 +2793,7 @@ func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -2500,6 +2852,10 @@ func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2544,6 +2900,7 @@ func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -2663,6 +3020,10 @@ func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2702,6 +3063,7 @@ func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -2808,6 +3170,10 @@ func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2847,6 +3213,7 @@ func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -2947,6 +3314,10 @@ func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2986,6 +3357,7 @@ func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -3086,6 +3458,10 @@ func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3125,6 +3501,7 @@ func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -3225,6 +3602,10 @@ func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) Handl return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3264,6 +3645,7 @@ func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) Handl } } + span.End() return out, metadata, err } @@ -3364,6 +3746,10 @@ func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3403,6 +3789,7 @@ func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -3503,6 +3890,10 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3514,6 +3905,11 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial output := &GetBucketLifecycleConfigurationOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -3542,6 +3938,7 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -3585,6 +3982,18 @@ func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smit } } +func awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(v *GetBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3642,6 +4051,10 @@ func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3681,6 +4094,7 @@ func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx contex } } + span.End() return out, metadata, err } @@ -3788,6 +4202,10 @@ func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3827,6 +4245,7 @@ func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -3912,14 +4331,14 @@ func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLogging return nil } -type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { +type awsRestxml_deserializeOpGetBucketMetadataTableConfiguration struct { } -func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { +func (*awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3927,15 +4346,19 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response, &metadata) } - output := &GetBucketMetricsConfigurationOutput{} + output := &GetBucketMetadataTableConfigurationOutput{} out.Result = output var buff [1024]byte @@ -3956,7 +4379,7 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) + err = awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&output.GetBucketMetadataTableConfigurationResult, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3966,10 +4389,11 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ } } + span.End() return out, metadata, err } -func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4009,13 +4433,13 @@ func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithy } } -func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeOpDocumentGetBucketMetadataTableConfigurationOutput(v **GetBucketMetadataTableConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *GetBucketMetricsConfigurationOutput + var sv *GetBucketMetadataTableConfigurationOutput if *v == nil { - sv = &GetBucketMetricsConfigurationOutput{} + sv = &GetBucketMetadataTableConfigurationOutput{} } else { sv = *v } @@ -4031,9 +4455,9 @@ func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **Get originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("MetricsConfiguration", t.Name.Local): + case strings.EqualFold("GetBucketMetadataTableConfigurationResult", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&sv.GetBucketMetadataTableConfigurationResult, nodeDecoder); err != nil { return err } @@ -4051,14 +4475,14 @@ func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **Get return nil } -type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { +type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { } -func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { +func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4066,15 +4490,19 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) } - output := &GetBucketNotificationConfigurationOutput{} + output := &GetBucketMetricsConfigurationOutput{} out.Result = output var buff [1024]byte @@ -4095,7 +4523,7 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) + err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4105,10 +4533,11 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser } } + span.End() return out, metadata, err } -func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4148,13 +4577,13 @@ func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *s } } -func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *GetBucketNotificationConfigurationOutput + var sv *GetBucketMetricsConfigurationOutput if *v == nil { - sv = &GetBucketNotificationConfigurationOutput{} + sv = &GetBucketMetricsConfigurationOutput{} } else { sv = *v } @@ -4170,15 +4599,159 @@ func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): + case strings.EqualFold("MetricsConfiguration", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) + } + output := &GetBucketNotificationConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketNotificationConfigurationOutput + if *v == nil { + sv = &GetBucketNotificationConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { return err } @@ -4223,6 +4796,10 @@ func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4262,6 +4839,7 @@ func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -4362,6 +4940,10 @@ func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4378,6 +4960,7 @@ func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context. return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -4457,6 +5040,10 @@ func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4496,6 +5083,7 @@ func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -4596,6 +5184,10 @@ func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4635,6 +5227,7 @@ func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -4735,6 +5328,10 @@ func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4774,6 +5371,7 @@ func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -4881,6 +5479,10 @@ func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4920,6 +5522,7 @@ func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -5020,6 +5623,10 @@ func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5059,6 +5666,7 @@ func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -5179,6 +5787,10 @@ func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5218,6 +5830,7 @@ func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -5336,6 +5949,10 @@ func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5357,6 +5974,7 @@ func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Contex return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -5440,6 +6058,11 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -5450,6 +6073,11 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentDisposition = ptr.String(headerValues[0]) @@ -5504,12 +6132,17 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -5652,6 +6285,10 @@ func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5696,6 +6333,7 @@ func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -5817,6 +6455,10 @@ func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5861,6 +6503,7 @@ func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -6048,6 +6691,10 @@ func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6087,6 +6734,7 @@ func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -6187,6 +6835,10 @@ func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6226,6 +6878,7 @@ func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -6326,6 +6979,10 @@ func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6365,6 +7022,7 @@ func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -6465,6 +7123,10 @@ func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6509,6 +7171,7 @@ func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -6621,6 +7284,10 @@ func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6642,6 +7309,7 @@ func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -6720,6 +7388,10 @@ func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6759,6 +7431,7 @@ func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -6859,6 +7532,10 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6875,6 +7552,7 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -6968,6 +7646,10 @@ func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6984,6 +7666,7 @@ func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -7069,6 +7752,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -7079,6 +7767,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentDisposition = ptr.String(headerValues[0]) @@ -7103,6 +7796,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ContentLength = ptr.Int64(vv) } + if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentRange = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentType = ptr.String(headerValues[0]) @@ -7128,12 +7826,17 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -7260,6 +7963,10 @@ func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7299,6 +8006,7 @@ func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeseri } } + span.End() return out, metadata, err } @@ -7441,6 +8149,10 @@ func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) Han return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7480,6 +8192,7 @@ func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) Han } } + span.End() return out, metadata, err } @@ -7622,6 +8335,10 @@ func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7661,6 +8378,7 @@ func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeseri } } + span.End() return out, metadata, err } @@ -7803,6 +8521,10 @@ func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7842,6 +8564,7 @@ func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserial } } + span.End() return out, metadata, err } @@ -7984,6 +8707,10 @@ func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8023,6 +8750,7 @@ func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Cont } } + span.End() return out, metadata, err } @@ -8094,12 +8822,38 @@ func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, de return err } + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + case strings.EqualFold("Owner", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { return err } + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -8129,6 +8883,10 @@ func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8168,6 +8926,7 @@ func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -8281,6 +9040,10 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8325,6 +9088,7 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -8580,6 +9344,10 @@ func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8624,6 +9392,7 @@ func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Cont } } + span.End() return out, metadata, err } @@ -8856,6 +9625,10 @@ func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8900,6 +9673,7 @@ func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -9162,6 +9936,10 @@ func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9206,6 +9984,7 @@ func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -9467,6 +10246,10 @@ func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9511,6 +10294,7 @@ func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Contex } } + span.End() return out, metadata, err } @@ -9628,6 +10412,19 @@ func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decode sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("Initiator", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { @@ -9773,6 +10570,10 @@ func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9790,6 +10591,7 @@ func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -9848,6 +10650,10 @@ func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9865,6 +10671,7 @@ func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -9923,6 +10730,10 @@ func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9940,6 +10751,7 @@ func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -9998,6 +10810,10 @@ func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10015,6 +10831,7 @@ func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -10073,6 +10890,10 @@ func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10090,6 +10911,7 @@ func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -10148,6 +10970,10 @@ func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) Handl return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10165,6 +10991,7 @@ func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) Handl } } + span.End() return out, metadata, err } @@ -10223,6 +11050,10 @@ func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10240,6 +11071,7 @@ func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -10298,6 +11130,10 @@ func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10309,12 +11145,12 @@ func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserial output := &PutBucketLifecycleConfigurationOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } + err = awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -10358,6 +11194,19 @@ func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smit } } +func awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(v *PutBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) + } + + return nil +} + type awsRestxml_deserializeOpPutBucketLogging struct { } @@ -10373,6 +11222,10 @@ func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10390,6 +11243,7 @@ func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -10448,6 +11302,10 @@ func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10465,6 +11323,7 @@ func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserializ } } + span.End() return out, metadata, err } @@ -10523,6 +11382,10 @@ func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10540,6 +11403,7 @@ func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -10598,6 +11462,10 @@ func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10615,6 +11483,7 @@ func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -10673,6 +11542,10 @@ func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10690,6 +11563,7 @@ func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context. } } + span.End() return out, metadata, err } @@ -10748,6 +11622,10 @@ func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10765,6 +11643,7 @@ func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -10823,6 +11702,10 @@ func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10840,6 +11723,7 @@ func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -10898,6 +11782,10 @@ func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10915,6 +11803,7 @@ func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -10973,6 +11862,10 @@ func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10990,6 +11883,7 @@ func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -11048,6 +11942,10 @@ func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11065,6 +11963,7 @@ func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -11123,6 +12022,10 @@ func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11139,6 +12042,7 @@ func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Contex return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11172,6 +12076,18 @@ func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metad } errorBody.Seek(0, io.SeekStart) switch { + case strings.EqualFold("EncryptionTypeMismatch", errorCode): + return awsRestxml_deserializeErrorEncryptionTypeMismatch(response, errorBody) + + case strings.EqualFold("InvalidRequest", errorCode): + return awsRestxml_deserializeErrorInvalidRequest(response, errorBody) + + case strings.EqualFold("InvalidWriteOffset", errorCode): + return awsRestxml_deserializeErrorInvalidWriteOffset(response, errorBody) + + case strings.EqualFold("TooManyParts", errorCode): + return awsRestxml_deserializeErrorTooManyParts(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -11206,6 +12122,11 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -11216,6 +12137,11 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ETag = ptr.String(headerValues[0]) @@ -11236,6 +12162,15 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.Size = ptr.Int64(vv) + } + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.SSECustomerAlgorithm = ptr.String(headerValues[0]) @@ -11279,6 +12214,10 @@ func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11295,6 +12234,7 @@ func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11369,6 +12309,10 @@ func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11385,6 +12329,7 @@ func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11456,6 +12401,10 @@ func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11472,6 +12421,7 @@ func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(c return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11543,6 +12493,10 @@ func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11559,6 +12513,7 @@ func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11630,6 +12585,10 @@ func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11646,6 +12605,7 @@ func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11717,6 +12677,10 @@ func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11734,6 +12698,7 @@ func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -11792,6 +12757,10 @@ func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11808,6 +12777,7 @@ func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Co return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11887,6 +12857,10 @@ func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11898,6 +12872,7 @@ func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx cont output := &SelectObjectContentOutput{} out.Result = output + span.End() return out, metadata, err } @@ -11956,6 +12931,10 @@ func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11972,6 +12951,7 @@ func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -12039,6 +13019,11 @@ func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, r v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -12097,6 +13082,10 @@ func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -12141,6 +13130,7 @@ func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.C } } + span.End() return out, metadata, err } @@ -12287,6 +13277,10 @@ func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -12304,6 +13298,7 @@ func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx c } } + span.End() return out, metadata, err } @@ -12865,6 +13860,11 @@ func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Res return output } +func awsRestxml_deserializeErrorEncryptionTypeMismatch(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.EncryptionTypeMismatch{} + return output +} + func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidObjectState{} var buff [1024]byte @@ -12898,13 +13898,23 @@ func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response return output } -func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchBucket{} +func awsRestxml_deserializeErrorInvalidRequest(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequest{} return output } -func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchKey{} +func awsRestxml_deserializeErrorInvalidWriteOffset(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidWriteOffset{} + return output +} + +func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchBucket{} + return output +} + +func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchKey{} return output } @@ -12928,6 +13938,11 @@ func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp. return output } +func awsRestxml_deserializeErrorTooManyParts(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyParts{} + return output +} + func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13677,6 +14692,19 @@ func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.No originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("BucketRegion", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.BucketRegion = ptr.String(xtv) + } + case strings.EqualFold("CreationDate", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -13909,6 +14937,19 @@ func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxm sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -13935,6 +14976,19 @@ func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxm sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -14256,6 +15310,19 @@ func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -14282,6 +15349,19 @@ func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -14374,6 +15454,19 @@ func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, deco sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -15210,6 +16303,42 @@ func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionC return nil } +func awsRestxml_deserializeDocumentEncryptionTypeMismatch(v **types.EncryptionTypeMismatch, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EncryptionTypeMismatch + if *v == nil { + sv = &types.EncryptionTypeMismatch{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15298,6 +16427,68 @@ func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.Node return nil } +func awsRestxml_deserializeDocumentErrorDetails(v **types.ErrorDetails, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ErrorDetails + if *v == nil { + sv = &types.ErrorDetails{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ErrorCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ErrorCode = ptr.String(xtv) + } + + case strings.EqualFold("ErrorMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ErrorMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15790,6 +16981,67 @@ func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule *v = sv return nil } +func awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(v **types.GetBucketMetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.GetBucketMetadataTableConfigurationResult + if *v == nil { + sv = &types.GetBucketMetadataTableConfigurationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Error", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("MetadataTableConfigurationResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetadataTableConfigurationResult(&sv.MetadataTableConfigurationResult, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -16565,6 +17817,78 @@ func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectSta return nil } +func awsRestxml_deserializeDocumentInvalidRequest(v **types.InvalidRequest, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidRequest + if *v == nil { + sv = &types.InvalidRequest{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInvalidWriteOffset(v **types.InvalidWriteOffset, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidWriteOffset + if *v == nil { + sv = &types.InvalidWriteOffset{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -17525,12 +18849,17 @@ func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleR return nil } -func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeDocumentLifecycleRuleFilter(v **types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var uv types.LifecycleRuleFilter - var memberFound bool + var sv *types.LifecycleRuleFilter + if *v == nil { + sv = &types.LifecycleRuleFilter{} + } else { + sv = *v + } + for { t, done, err := decoder.Token() if err != nil { @@ -17539,27 +18868,16 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if done { break } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { case strings.EqualFold("And", t.Name.Local): - var mv types.LifecycleRuleAndOperator nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&sv.And, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.LifecycleRuleFilterMemberAnd{Value: mv} - memberFound = true case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): - var mv int64 val, err := decoder.Value() if err != nil { return err @@ -17573,13 +18891,10 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if err != nil { return err } - mv = i64 + sv.ObjectSizeGreaterThan = ptr.Int64(i64) } - uv = &types.LifecycleRuleFilterMemberObjectSizeGreaterThan{Value: mv} - memberFound = true case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): - var mv int64 val, err := decoder.Value() if err != nil { return err @@ -17593,13 +18908,10 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if err != nil { return err } - mv = i64 + sv.ObjectSizeLessThan = ptr.Int64(i64) } - uv = &types.LifecycleRuleFilterMemberObjectSizeLessThan{Value: mv} - memberFound = true case strings.EqualFold("Prefix", t.Name.Local): - var mv string val, err := decoder.Value() if err != nil { return err @@ -17609,30 +18921,26 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil } { xtv := string(val) - mv = xtv + sv.Prefix = ptr.String(xtv) } - uv = &types.LifecycleRuleFilterMemberPrefix{Value: mv} - memberFound = true case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.LifecycleRuleFilterMemberTag{Value: mv} - memberFound = true default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } } decoder = originalDecoder } - *v = uv + *v = sv return nil } @@ -17778,6 +19086,48 @@ func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, deco return nil } +func awsRestxml_deserializeDocumentMetadataTableConfigurationResult(v **types.MetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetadataTableConfigurationResult + if *v == nil { + sv = &types.MetadataTableConfigurationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3TablesDestinationResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentS3TablesDestinationResult(&sv.S3TablesDestinationResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -18146,6 +19496,19 @@ func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, de sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("Initiated", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -18731,6 +20094,19 @@ func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.No return err } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19230,6 +20606,19 @@ func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smit sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19332,6 +20721,19 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode return err } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19797,6 +21199,19 @@ func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDe sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -20824,12 +22239,17 @@ func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.Replicat return nil } -func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeDocumentReplicationRuleFilter(v **types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var uv types.ReplicationRuleFilter - var memberFound bool + var sv *types.ReplicationRuleFilter + if *v == nil { + sv = &types.ReplicationRuleFilter{} + } else { + sv = *v + } + for { t, done, err := decoder.Token() if err != nil { @@ -20838,27 +22258,16 @@ func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRul if done { break } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { case strings.EqualFold("And", t.Name.Local): - var mv types.ReplicationRuleAndOperator nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&sv.And, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.ReplicationRuleFilterMemberAnd{Value: mv} - memberFound = true case strings.EqualFold("Prefix", t.Name.Local): - var mv string val, err := decoder.Value() if err != nil { return err @@ -20868,30 +22277,26 @@ func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRul } { xtv := string(val) - mv = xtv + sv.Prefix = ptr.String(xtv) } - uv = &types.ReplicationRuleFilterMemberPrefix{Value: mv} - memberFound = true case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.ReplicationRuleFilterMemberTag{Value: mv} - memberFound = true default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } } decoder = originalDecoder } - *v = uv + *v = sv return nil } @@ -21298,6 +22703,94 @@ func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder sm return nil } +func awsRestxml_deserializeDocumentS3TablesDestinationResult(v **types.S3TablesDestinationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.S3TablesDestinationResult + if *v == nil { + sv = &types.S3TablesDestinationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TableArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableArn = ptr.String(xtv) + } + + case strings.EqualFold("TableBucketArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableBucketArn = ptr.String(xtv) + } + + case strings.EqualFold("TableName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableName = ptr.String(xtv) + } + + case strings.EqualFold("TableNamespace", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableNamespace = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -22370,6 +23863,42 @@ func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, deco *v = sv return nil } +func awsRestxml_deserializeDocumentTooManyParts(v **types.TooManyParts, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TooManyParts + if *v == nil { + sv = &types.TooManyParts{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go index 91af48fc..bb5f4cf4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/aws/aws-sdk-go-v2/aws" smithyauth "github.com/aws/smithy-go/auth" ) @@ -18,6 +19,16 @@ func (r *endpointAuthResolver) ResolveAuthSchemes( ) ( []*smithyauth.Option, error, ) { + if params.endpointParams.Region == nil { + // #2502: We're correcting the endpoint binding behavior to treat empty + // Region as "unset" (nil), but auth resolution technically doesn't + // care and someone could be using V1 or non-default V2 endpoint + // resolution, both of which would bypass the required-region check. + // They shouldn't be broken because the region is technically required + // by this service's endpoint-based auth resolver, so we stub it here. + params.endpointParams.Region = aws.String("") + } + opts, err := r.resolveAuthSchemes(ctx, params) if err != nil { return nil, err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go index a1f2e36d..9734372c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -20,6 +20,7 @@ import ( "github.com/aws/smithy-go/endpoints/private/rulesfn" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -228,6 +229,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -319,6 +327,13 @@ type EndpointParameters struct { // is required. Prefix *string + // The Copy Source used for Copy Object request. This is an optional parameter that + // will be set automatically for operations that are scoped to Copy + // Source. + // + // Parameter is required. + CopySource *string + // Internal parameter to disable Access Point Buckets // // Parameter is required. @@ -412,6 +427,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -689,15 +715,61 @@ func (r *resolver) ResolveEndpoint( _UseS3ExpressControlEndpoint := *exprVal _ = _UseS3ExpressControlEndpoint if _UseS3ExpressControlEndpoint == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - if !(params.Endpoint != nil) { - if _UseFIPS == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + if !(params.Endpoint != nil) { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } uriString := func() string { var out strings.Builder - out.WriteString("https://s3express-control-fips.") + out.WriteString("https://s3express-control.") out.WriteString(_Region) - out.WriteString(".amazonaws.com/") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") out.WriteString(_uri_encoded_bucket) return out.String() }() @@ -732,70 +804,80 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") } return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") } } if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } uriString := func() string { var out strings.Builder out.WriteString("https://") out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") + out.WriteString(".s3express-") out.WriteString(_s3expressAvailabilityZoneId) out.WriteString(".") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -829,67 +911,69 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } uriString := func() string { var out strings.Builder out.WriteString("https://") out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") + out.WriteString(".s3express-") out.WriteString(_s3expressAvailabilityZoneId) out.WriteString(".") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -923,70 +1007,1828 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil } } + if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil { + _accessPointSuffix := *exprVal + _ = _accessPointSuffix + if _accessPointSuffix == "--xa-s3" { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 7, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 16, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 16, 18, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 21, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 21, 23, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 27, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 27, 29, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { + if exprVal := rulesfn.SubString(_Bucket, 7, 16, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 16, 18, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 21, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 21, 23, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } uriString := func() string { var out strings.Builder out.WriteString("https://") out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") + out.WriteString(".s3express-") out.WriteString(_s3expressAvailabilityZoneId) out.WriteString(".") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -1020,67 +2862,69 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 27, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 27, 29, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } uriString := func() string { var out strings.Builder out.WriteString("https://") out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") + out.WriteString(".s3express-") out.WriteString(_s3expressAvailabilityZoneId) out.WriteString(".") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -1114,51 +2958,11 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil } } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") } return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") } @@ -1169,18 +2973,62 @@ func (r *resolver) ResolveEndpoint( _UseS3ExpressControlEndpoint := *exprVal _ = _UseS3ExpressControlEndpoint if _UseS3ExpressControlEndpoint == true { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { uriString := func() string { var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -1214,13 +3062,12 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - } - if _UseFIPS == true { uriString := func() string { var out strings.Builder - out.WriteString("https://s3express-control-fips.") + out.WriteString("https://s3express-control.") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -1254,43 +3101,7 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") } } } @@ -1345,6 +3156,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -1388,6 +3212,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -1439,6 +3276,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -1484,6 +3334,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -3677,8 +5540,8 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") } if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { - _var_275 := *exprVal - _ = _var_275 + _var_321 := *exprVal + _ = _var_321 return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") } if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { @@ -3748,6 +5611,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -3793,6 +5669,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -3895,8 +5784,8 @@ func (r *resolver) ResolveEndpoint( } if _ForcePathStyle == true { if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _var_288 := *exprVal - _ = _var_288 + _var_334 := *exprVal + _ = _var_334 return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") } } @@ -5665,10 +7554,10 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} - params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.Region = bindRegion(options.Region) params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) params.Endpoint = options.BaseEndpoint @@ -5697,6 +7586,9 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } @@ -5710,12 +7602,17 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -5737,8 +7634,11 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + ctx = setS3ResolvedURI(ctx, endpt.URI.String()) + backend := s3cust.GetPropertiesBackend(&endpt.Properties) ctx = internalcontext.SetS3Backend(ctx, backend) + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go index 18d6c06a..7c7a7b42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go @@ -24,14 +24,9 @@ func finalizeExpressCredentials(o *Options, c *Client) { } // Operation config finalizer: update the sigv4 credentials on the default -// express provider if it changed to ensure different cache keys +// express provider in case it changed to ensure different cache keys func finalizeOperationExpressCredentials(o *Options, c Client) { - p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider) - if !ok { - return - } - - if c.options.Credentials != o.Credentials { + if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { o.ExpressCredentials = p.CloneWithBaseCredentials(o.Credentials) } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go new file mode 100644 index 00000000..a9b54535 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "context" + "strings" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// isExpressUserAgent tracks whether the caller is using S3 Express +// +// we can only derive this at runtime, so the middleware needs to hold a handle +// to the underlying user-agent manipulator to set the feature flag as +// necessary +type isExpressUserAgent struct { + ua *awsmiddleware.RequestUserAgent +} + +func (*isExpressUserAgent) ID() string { + return "isExpressUserAgent" +} + +func (m *isExpressUserAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + const expressSuffix = "--x-s3" + + bucket, ok := bucketFromInput(in.Parameters) + if ok && strings.HasSuffix(bucket, expressSuffix) { + m.ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureS3ExpressBucket) + } + return next.HandleSerialize(ctx, in) +} + +func addIsExpressUserAgent(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Serialize.Add(&isExpressUserAgent{ua}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json index 4e666764..fa0119bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -9,8 +9,7 @@ "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", @@ -19,6 +18,7 @@ "api_op_CompleteMultipartUpload.go", "api_op_CopyObject.go", "api_op_CreateBucket.go", + "api_op_CreateBucketMetadataTableConfiguration.go", "api_op_CreateMultipartUpload.go", "api_op_CreateSession.go", "api_op_DeleteBucket.go", @@ -28,6 +28,7 @@ "api_op_DeleteBucketIntelligentTieringConfiguration.go", "api_op_DeleteBucketInventoryConfiguration.go", "api_op_DeleteBucketLifecycle.go", + "api_op_DeleteBucketMetadataTableConfiguration.go", "api_op_DeleteBucketMetricsConfiguration.go", "api_op_DeleteBucketOwnershipControls.go", "api_op_DeleteBucketPolicy.go", @@ -48,6 +49,7 @@ "api_op_GetBucketLifecycleConfiguration.go", "api_op_GetBucketLocation.go", "api_op_GetBucketLogging.go", + "api_op_GetBucketMetadataTableConfiguration.go", "api_op_GetBucketMetricsConfiguration.go", "api_op_GetBucketNotificationConfiguration.go", "api_op_GetBucketOwnershipControls.go", @@ -123,13 +125,15 @@ "options.go", "protocol_test.go", "serializers.go", + "snapshot_test.go", + "sra_operation_order_test.go", "types/enums.go", "types/errors.go", "types/types.go", "types/types_exported_test.go", "validators.go" ], - "go": "1.15", + "go": "1.22", "module": "github.com/aws/aws-sdk-go-v2/service/s3", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index bff6ac9a..1cac5025 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.48.1" +const goModuleVersion = "1.79.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go index f3e6b075..38443a83 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go @@ -89,6 +89,7 @@ func New() *Resolver { var partitionRegexp = struct { Aws *regexp.Regexp AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp AwsIsoE *regexp.Regexp @@ -96,8 +97,9 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), @@ -252,6 +254,24 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-5.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-7.amazonaws.com", + }, endpoints.EndpointKey{ Region: "aws-global", }: endpoints.Endpoint{ @@ -460,6 +480,15 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "s3.dualstack.me-south-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.mx-central-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "s3-external-1", }: endpoints.Endpoint{ @@ -641,6 +670,27 @@ var defaultPartitions = endpoints.Partitions{ }, }, }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, { ID: "aws-iso", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ @@ -773,19 +823,24 @@ var defaultPartitions = endpoints.Partitions{ Variant: endpoints.FIPSVariant, }: { Hostname: "s3-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, { Variant: 0, }: { Hostname: "s3.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, }, RegionRegex: partitionRegexp.AwsIsoE, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "eu-isoe-west-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-iso-f", @@ -794,19 +849,27 @@ var defaultPartitions = endpoints.Partitions{ Variant: endpoints.FIPSVariant, }: { Hostname: "s3-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, { Variant: 0, }: { Hostname: "s3.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, }, RegionRegex: partitionRegexp.AwsIsoF, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-us-gov", @@ -947,6 +1010,19 @@ func GetDNSSuffix(id string, options Options) (string, error) { } + case strings.EqualFold(id, "aws-eusc"): + switch variant { + case endpoints.FIPSVariant: + return "amazonaws.eu", nil + + case 0: + return "amazonaws.eu", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + case strings.EqualFold(id, "aws-iso"): switch variant { case endpoints.FIPSVariant: @@ -1034,6 +1110,9 @@ func GetDNSSuffixFromRegion(region string, options Options) (string, error) { case partitionRegexp.AwsCn.MatchString(region): return GetDNSSuffix("aws-cn", options) + case partitionRegexp.AwsEusc.MatchString(region): + return GetDNSSuffix("aws-eusc", options) + case partitionRegexp.AwsIso.MatchString(region): return GetDNSSuffix("aws-iso", options) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go index 064bcefb..6f29b807 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go @@ -12,7 +12,9 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -50,6 +52,10 @@ type Options struct { // clients initial default settings. DefaultsMode aws.DefaultsMode + // Disables logging when the client skips output checksum validation due to lack + // of algorithm support. + DisableLogOutputChecksumValidationSkipped bool + // Allows you to disable S3 Multi-Region access points feature. DisableMultiRegionAccessPoints bool @@ -65,8 +71,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -82,23 +90,35 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string + // Indicates how user opt-in/out request checksum calculation + RequestChecksumCalculation aws.RequestChecksumCalculation + + // Indicates how user opt-in/out response checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation + // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -113,6 +133,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // Allows you to enable arn region support for the service. UseARNRegion bool @@ -141,8 +164,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -193,6 +217,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go new file mode 100644 index 00000000..491ed2e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go @@ -0,0 +1,419 @@ +package s3 + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + algorithmHeader = "X-Amz-Algorithm" + credentialHeader = "X-Amz-Credential" + dateHeader = "X-Amz-Date" + tokenHeader = "X-Amz-Security-Token" + signatureHeader = "X-Amz-Signature" + + algorithm = "AWS4-HMAC-SHA256" + aws4Request = "aws4_request" + bucketHeader = "bucket" + defaultExpiresIn = 15 * time.Minute + shortDateLayout = "20060102" +) + +// PresignPostObject is a special kind of [presigned request] used to send a request using +// form data, likely from an HTML form on a browser. +// Unlike other presigned operations, the return values of this function are not meant to be used directly +// to make an HTTP request but rather to be used as inputs to a form. See [the docs] for more information +// on how to use these values +// +// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html +// [the docs] https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html +func (c *PresignClient) PresignPostObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignPostOptions)) (*PresignedPostRequest, error) { + if params == nil { + params = &PutObjectInput{} + } + clientOptions := c.options.copy() + options := PresignPostOptions{ + Expires: clientOptions.Expires, + PostPresigner: &postSignAdapter{}, + } + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(clientOptions.ClientOptions, withNopHTTPClientAPIOption) + cvt := presignPostConverter(options) + result, _, err := c.client.invokeOperation(ctx, "$type:L", params, clientOptFns, + c.client.addOperationPutObjectMiddlewares, + cvt.ConvertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + ) + if err != nil { + return nil, err + } + + out := result.(*PresignedPostRequest) + return out, nil +} + +// PresignedPostRequest represents a presigned request to be sent using HTTP verb POST and FormData +type PresignedPostRequest struct { + // Represents the Base URL to make a request to + URL string + // Values is a key-value map of values to be sent as FormData + // these values are not encoded + Values map[string]string +} + +// postSignAdapter adapter to implement the presignPost interface +type postSignAdapter struct{} + +// PresignPost creates a special kind of [presigned request] +// to be used with HTTP verb POST. +// It differs from PUT request mostly on +// 1. It accepts a new set of parameters, `Conditions[]`, that are used to create a policy doc to limit where an object can be posted to +// 2. The return value needs to have more processing since it's meant to be sent via a form and not stand on its own +// 3. There's no body to be signed, since that will be attached when the actual request is made +// 4. The signature is made based on the policy document, not the whole request +// More information can be found at https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html +// +// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html +func (s *postSignAdapter) PresignPost( + credentials aws.Credentials, + bucket string, key string, + region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, optFns ...func(*v4.SignerOptions), +) (fields map[string]string, err error) { + credentialScope := buildCredentialScope(signingTime, region, service) + credentialStr := credentials.AccessKeyID + "/" + credentialScope + + policyDoc, err := createPolicyDocument(expirationTime, signingTime, bucket, key, credentialStr, &credentials.SessionToken, conditions) + if err != nil { + return nil, err + } + + signature := buildSignature(policyDoc, credentials.SecretAccessKey, service, region, signingTime) + + fields = getPostSignRequiredFields(signingTime, credentialStr, credentials) + fields[signatureHeader] = signature + fields["key"] = key + fields["policy"] = policyDoc + + return fields, nil +} + +func getPostSignRequiredFields(t time.Time, credentialStr string, awsCredentials aws.Credentials) map[string]string { + fields := map[string]string{ + algorithmHeader: algorithm, + dateHeader: t.UTC().Format("20060102T150405Z"), + credentialHeader: credentialStr, + } + + sessionToken := awsCredentials.SessionToken + if len(sessionToken) > 0 { + fields[tokenHeader] = sessionToken + } + + return fields +} + +// PresignPost defines the interface to presign a POST request +type PresignPost interface { + PresignPost( + credentials aws.Credentials, + bucket string, key string, + region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (fields map[string]string, err error) +} + +// PresignPostOptions represent the options to be passed to a PresignPost sign request +type PresignPostOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // PostPresigner to use. One will be created if none is provided + PostPresigner PresignPost + + // Expires sets the expiration duration for the generated presign url. This should + // be the duration in seconds the presigned URL should be considered valid for. If + // not set or set to zero, presign url would default to expire after 900 seconds. + Expires time.Duration + + // Conditions a list of extra conditions to pass to the policy document + // Available conditions can be found [here] + // + // [here]https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions + Conditions []interface{} +} + +type presignPostConverter PresignPostOptions + +// presignPostRequestMiddlewareOptions is the options for the presignPostRequestMiddleware middleware. +type presignPostRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner PresignPost + LogSigning bool + ExpiresIn time.Duration + Conditions []interface{} +} + +type presignPostRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner PresignPost + logSigning bool + expiresIn time.Duration + conditions []interface{} +} + +// newPresignPostRequestMiddleware returns a new presignPostRequestMiddleware +// initialized with the presigner. +func newPresignPostRequestMiddleware(options presignPostRequestMiddlewareOptions) *presignPostRequestMiddleware { + return &presignPostRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + expiresIn: options.ExpiresIn, + conditions: options.Conditions, + } +} + +// ID provides the middleware ID. +func (*presignPostRequestMiddleware) ID() string { return "PresignPostRequestMiddleware" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *presignPostRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + + input := getOperationInput(ctx) + asS3Put, ok := input.(*PutObjectInput) + if !ok { + return out, metadata, fmt.Errorf("expected PutObjectInput") + } + bucketName, ok := asS3Put.bucket() + if !ok { + return out, metadata, fmt.Errorf("requested input bucketName not found on request") + } + uploadKey := asS3Put.Key + if uploadKey == nil { + return out, metadata, fmt.Errorf("PutObject input does not have a key input") + } + + uri := getS3ResolvedURI(ctx) + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &v4.SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + skew := internalcontext.GetAttemptSkewContext(ctx) + signingTime := sdk.NowTime().Add(skew) + expirationTime := signingTime.Add(s.expiresIn).UTC() + + fields, err := s.presigner.PresignPost( + credentials, + bucketName, + *uploadKey, + signingRegion, + signingName, + signingTime, + s.conditions, + expirationTime, + func(o *v4.SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &v4.SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedPostRequest{ + URL: uri, + Values: fields, + } + + return out, metadata, nil +} + +// Adapted from existing PresignConverter middleware +func (c presignPostConverter) ConvertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Build.Remove("UserAgent") + stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) + stack.Finalize.Remove((*retry.Attempt)(nil).ID()) + stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) + stack.Deserialize.Clear() + + if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { + return err + } + + // if no expiration is set, set one + expiresIn := c.Expires + if expiresIn == 0 { + expiresIn = defaultExpiresIn + } + + pmw := newPresignPostRequestMiddleware(presignPostRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.PostPresigner, + LogSigning: options.ClientLogMode.IsSigning(), + ExpiresIn: expiresIn, + Conditions: c.Conditions, + }) + if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func createPolicyDocument(expirationTime time.Time, signingTime time.Time, bucket string, key string, credentialString string, securityToken *string, extraConditions []interface{}) (string, error) { + initialConditions := []interface{}{ + map[string]string{ + algorithmHeader: algorithm, + }, + map[string]string{ + bucketHeader: bucket, + }, + map[string]string{ + credentialHeader: credentialString, + }, + map[string]string{ + dateHeader: signingTime.UTC().Format("20060102T150405Z"), + }, + } + + var conditions []interface{} + for _, v := range initialConditions { + conditions = append(conditions, v) + } + + if securityToken != nil && *securityToken != "" { + conditions = append(conditions, map[string]string{ + tokenHeader: *securityToken, + }) + } + + // append user-defined conditions at the end + conditions = append(conditions, extraConditions...) + + // The policy allows you to set a "key" value to specify what's the name of the + // key to add. Customers can add one by specifying one in their conditions, + // so we're checking if one has already been set. + // If none is found, restrict this to just the key name passed on the request + // This can be disabled by adding a condition that explicitly allows + // everything + if !isAlreadyCheckingForKey(conditions) { + conditions = append(conditions, map[string]string{"key": key}) + } + + policyDoc := map[string]interface{}{ + "conditions": conditions, + "expiration": expirationTime.Format(time.RFC3339), + } + + jsonBytes, err := json.Marshal(policyDoc) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(jsonBytes), nil +} + +func isAlreadyCheckingForKey(conditions []interface{}) bool { + // Need to check for two conditions: + // 1. A condition of the form ["starts-with", "$key", "mykey"] + // 2. A condition of the form {"key": "mykey"} + for _, c := range conditions { + slice, ok := c.([]interface{}) + if ok && len(slice) > 1 { + if slice[0] == "starts-with" && slice[1] == "$key" { + return true + } + } + m, ok := c.(map[string]interface{}) + if ok && len(m) > 0 { + for k := range m { + if k == "key" { + return true + } + } + } + // Repeat this but for map[string]string due to type constrains + ms, ok := c.(map[string]string) + if ok && len(ms) > 0 { + for k := range ms { + if k == "key" { + return true + } + } + } + } + return false +} + +// these methods have been copied from v4 implementation since they are not exported for public use +func hmacsha256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func buildSignature(strToSign, secret, service, region string, t time.Time) string { + key := deriveKey(secret, service, region, t) + return hex.EncodeToString(hmacsha256(key, []byte(strToSign))) +} + +func deriveKey(secret, service, region string, t time.Time) []byte { + hmacDate := hmacsha256([]byte("AWS4"+secret), []byte(t.UTC().Format(shortDateLayout))) + hmacRegion := hmacsha256(hmacDate, []byte(region)) + hmacService := hmacsha256(hmacRegion, []byte(service)) + return hmacsha256(hmacService, []byte(aws4Request)) +} + +func buildCredentialScope(signingTime time.Time, region, service string) string { + return strings.Join([]string{ + signingTime.UTC().Format(shortDateLayout), + region, + service, + aws4Request, + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go index 59524bdc..c37fab1e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -12,6 +12,7 @@ import ( smithyxml "github.com/aws/smithy-go/encoding/xml" "github.com/aws/smithy-go/middleware" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "strconv" @@ -28,6 +29,10 @@ func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -64,6 +69,8 @@ func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -71,11 +78,16 @@ func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipa return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatchInitiatedTime != nil { + locationName := "X-Amz-If-Match-Initiated-Time" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchInitiatedTime)) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -107,6 +119,10 @@ func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -118,7 +134,7 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -167,6 +183,8 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -174,31 +192,51 @@ func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteM return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if len(v.ChecksumType) > 0 { + locationName := "X-Amz-Checksum-Type" + encoder.SetHeader(locationName).String(string(v.ChecksumType)) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfNoneMatch != nil { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -208,22 +246,27 @@ func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteM } } + if v.MpuObjectSize != nil { + locationName := "X-Amz-Mp-Object-Size" + encoder.SetHeader(locationName).Long(*v.MpuObjectSize) + } + if len(v.RequestPayer) > 0 { locationName := "X-Amz-Request-Payer" encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -245,6 +288,10 @@ func (*awsRestxml_serializeOpCopyObject) ID() string { func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -281,6 +328,8 @@ func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { @@ -298,7 +347,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -308,32 +357,32 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.CopySource != nil && len(*v.CopySource) > 0 { + if v.CopySource != nil { locationName := "X-Amz-Copy-Source" encoder.SetHeader(locationName).String(*v.CopySource) } - if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + if v.CopySourceIfMatch != nil { locationName := "X-Amz-Copy-Source-If-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) } @@ -343,7 +392,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) } - if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + if v.CopySourceIfNoneMatch != nil { locationName := "X-Amz-Copy-Source-If-None-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) } @@ -353,27 +402,27 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) } - if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + if v.CopySourceSSECustomerAlgorithm != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) } - if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + if v.CopySourceSSECustomerKey != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) } - if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + if v.CopySourceSSECustomerKeyMD5 != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + if v.ExpectedSourceBucketOwner != nil { locationName := "X-Amz-Source-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) } @@ -383,22 +432,22 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -415,9 +464,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -451,27 +498,27 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -481,7 +528,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } @@ -491,7 +538,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.TaggingDirective)) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } @@ -509,6 +556,10 @@ func (*awsRestxml_serializeOpCreateBucket) ID() string { func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -569,6 +620,8 @@ func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { @@ -581,27 +634,27 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -619,6 +672,107 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e return nil } +type awsRestxml_serializeOpCreateBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MetadataTableConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetadataTableConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentMetadataTableConfiguration(input.MetadataTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + type awsRestxml_serializeOpCreateMultipartUpload struct { } @@ -629,6 +783,10 @@ func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -640,7 +798,7 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -665,6 +823,8 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -682,7 +842,7 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -692,27 +852,32 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if len(v.ChecksumType) > 0 { + locationName := "X-Amz-Checksum-Type" + encoder.SetHeader(locationName).String(string(v.ChecksumType)) + } + + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -722,22 +887,22 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -754,9 +919,7 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -785,27 +948,27 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -815,12 +978,12 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } @@ -838,6 +1001,10 @@ func (*awsRestxml_serializeOpCreateSession) ID() string { func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -874,6 +1041,8 @@ func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { @@ -881,11 +1050,31 @@ func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, return fmt.Errorf("unsupported serialization of nil %T", v) } + if v.BucketKeyEnabled != nil { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + if len(v.SessionMode) > 0 { locationName := "X-Amz-Create-Session-Mode" encoder.SetHeader(locationName).String(string(v.SessionMode)) } + if v.SSEKMSEncryptionContext != nil { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + return nil } @@ -899,6 +1088,10 @@ func (*awsRestxml_serializeOpDeleteBucket) ID() string { func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -935,6 +1128,8 @@ func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { @@ -942,7 +1137,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -960,6 +1155,10 @@ func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -996,6 +1195,8 @@ func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1003,7 +1204,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1025,6 +1226,10 @@ func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1061,6 +1266,8 @@ func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -1068,7 +1275,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCors return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1086,6 +1293,10 @@ func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1122,6 +1333,8 @@ func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -1129,7 +1342,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBuck return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1147,6 +1360,10 @@ func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() s func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1183,6 +1400,8 @@ func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) Hand } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1207,6 +1426,10 @@ func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1243,6 +1466,8 @@ func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1250,7 +1475,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1272,6 +1497,10 @@ func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1308,6 +1537,8 @@ func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { @@ -1315,7 +1546,74 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucke return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1333,6 +1631,10 @@ func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1369,6 +1671,8 @@ func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1376,7 +1680,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1398,6 +1702,10 @@ func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1434,6 +1742,8 @@ func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -1441,7 +1751,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *Del return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1459,6 +1769,10 @@ func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1495,6 +1809,8 @@ func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -1502,7 +1818,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1520,6 +1836,10 @@ func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1556,6 +1876,8 @@ func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -1563,7 +1885,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1581,6 +1903,10 @@ func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1617,6 +1943,8 @@ func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -1624,7 +1952,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1642,6 +1970,10 @@ func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1678,6 +2010,8 @@ func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -1685,7 +2019,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketW return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1703,6 +2037,10 @@ func (*awsRestxml_serializeOpDeleteObject) ID() string { func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1739,6 +2077,8 @@ func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { @@ -1751,11 +2091,26 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfMatchLastModifiedTime != nil { + locationName := "X-Amz-If-Match-Last-Modified-Time" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchLastModifiedTime)) + } + + if v.IfMatchSize != nil { + locationName := "X-Amz-If-Match-Size" + encoder.SetHeader(locationName).Long(*v.IfMatchSize) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -1765,7 +2120,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e } } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -1792,6 +2147,10 @@ func (*awsRestxml_serializeOpDeleteObjects) ID() string { func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1803,7 +2162,7 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects") + opPath, opQuery := httpbinding.SplitURI("/?delete") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -1852,6 +2211,8 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { @@ -1869,12 +2230,12 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -1897,6 +2258,10 @@ func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1933,6 +2298,8 @@ func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -1940,7 +2307,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1971,6 +2338,10 @@ func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2007,6 +2378,8 @@ func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -2014,7 +2387,7 @@ func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePub return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2032,6 +2405,10 @@ func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2068,6 +2445,8 @@ func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2075,7 +2454,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2098,6 +2477,10 @@ func (*awsRestxml_serializeOpGetBucketAcl) ID() string { func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2134,6 +2517,8 @@ func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { @@ -2141,7 +2526,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2159,6 +2544,10 @@ func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2195,6 +2584,8 @@ func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2202,7 +2593,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2224,6 +2615,10 @@ func (*awsRestxml_serializeOpGetBucketCors) ID() string { func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2260,6 +2655,8 @@ func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -2267,7 +2664,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2285,6 +2682,10 @@ func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2321,6 +2722,8 @@ func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -2328,7 +2731,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2346,6 +2749,10 @@ func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() stri func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2382,6 +2789,8 @@ func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleS } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2406,6 +2815,10 @@ func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2442,6 +2855,8 @@ func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2449,7 +2864,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2471,6 +2886,10 @@ func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2507,6 +2926,8 @@ func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2514,7 +2935,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2532,6 +2953,10 @@ func (*awsRestxml_serializeOpGetBucketLocation) ID() string { func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2568,6 +2993,8 @@ func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { @@ -2575,7 +3002,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocati return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2593,6 +3020,10 @@ func (*awsRestxml_serializeOpGetBucketLogging) ID() string { func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2629,6 +3060,8 @@ func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { @@ -2636,7 +3069,74 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLogging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2654,6 +3154,10 @@ func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2690,6 +3194,8 @@ func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2697,7 +3203,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *Get return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2719,6 +3225,10 @@ func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2755,6 +3265,8 @@ func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2762,7 +3274,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2780,6 +3292,10 @@ func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2816,6 +3332,8 @@ func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -2823,7 +3341,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2841,6 +3359,10 @@ func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2877,6 +3399,8 @@ func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -2884,7 +3408,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyIn return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2902,6 +3426,10 @@ func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2938,6 +3466,8 @@ func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { @@ -2945,7 +3475,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2963,6 +3493,10 @@ func (*awsRestxml_serializeOpGetBucketReplication) ID() string { func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2999,6 +3533,8 @@ func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -3006,7 +3542,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketRep return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3024,6 +3560,10 @@ func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3060,6 +3600,8 @@ func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { @@ -3067,7 +3609,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucket return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3085,6 +3627,10 @@ func (*awsRestxml_serializeOpGetBucketTagging) ID() string { func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3121,6 +3667,8 @@ func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -3128,7 +3676,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3146,6 +3694,10 @@ func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3182,6 +3734,8 @@ func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { @@ -3189,7 +3743,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVers return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3207,6 +3761,10 @@ func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3243,6 +3801,8 @@ func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -3250,7 +3810,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsite return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3268,6 +3828,10 @@ func (*awsRestxml_serializeOpGetObject) ID() string { func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3304,6 +3868,8 @@ func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { @@ -3316,12 +3882,12 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ChecksumMode)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.IfMatch != nil && len(*v.IfMatch) > 0 { + if v.IfMatch != nil { locationName := "If-Match" encoder.SetHeader(locationName).String(*v.IfMatch) } @@ -3331,7 +3897,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) } - if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + if v.IfNoneMatch != nil { locationName := "If-None-Match" encoder.SetHeader(locationName).String(*v.IfNoneMatch) } @@ -3354,7 +3920,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetQuery("partNumber").Integer(*v.PartNumber) } - if v.Range != nil && len(*v.Range) > 0 { + if v.Range != nil { locationName := "Range" encoder.SetHeader(locationName).String(*v.Range) } @@ -3388,17 +3954,17 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -3420,6 +3986,10 @@ func (*awsRestxml_serializeOpGetObjectAcl) ID() string { func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3456,6 +4026,8 @@ func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { @@ -3463,7 +4035,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3499,6 +4071,10 @@ func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3535,6 +4111,8 @@ func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { @@ -3542,7 +4120,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3563,6 +4141,9 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr if v.ObjectAttributes != nil { locationName := "X-Amz-Object-Attributes" + if len(v.ObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.ObjectAttributes { if len(v.ObjectAttributes[i]) > 0 { escaped := string(v.ObjectAttributes[i]) @@ -3575,7 +4156,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr } } - if v.PartNumberMarker != nil && len(*v.PartNumberMarker) > 0 { + if v.PartNumberMarker != nil { locationName := "X-Amz-Part-Number-Marker" encoder.SetHeader(locationName).String(*v.PartNumberMarker) } @@ -3585,17 +4166,17 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -3617,6 +4198,10 @@ func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3653,6 +4238,8 @@ func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { @@ -3660,7 +4247,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegal return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3696,6 +4283,10 @@ func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3732,6 +4323,8 @@ func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { @@ -3739,7 +4332,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObj return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3757,6 +4350,10 @@ func (*awsRestxml_serializeOpGetObjectRetention) ID() string { func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3793,6 +4390,8 @@ func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { @@ -3800,7 +4399,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectReten return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3836,6 +4435,10 @@ func (*awsRestxml_serializeOpGetObjectTagging) ID() string { func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3872,6 +4475,8 @@ func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -3879,7 +4484,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3915,6 +4520,10 @@ func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3951,6 +4560,8 @@ func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { @@ -3958,7 +4569,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrent return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3990,6 +4601,10 @@ func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4026,6 +4641,8 @@ func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -4033,7 +4650,7 @@ func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAcc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4051,6 +4668,10 @@ func (*awsRestxml_serializeOpHeadBucket) ID() string { func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4087,6 +4708,8 @@ func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { @@ -4094,7 +4717,7 @@ func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encod return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4112,6 +4735,10 @@ func (*awsRestxml_serializeOpHeadObject) ID() string { func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4148,6 +4775,8 @@ func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { @@ -4160,12 +4789,12 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumMode)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.IfMatch != nil && len(*v.IfMatch) > 0 { + if v.IfMatch != nil { locationName := "If-Match" encoder.SetHeader(locationName).String(*v.IfMatch) } @@ -4175,7 +4804,7 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) } - if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + if v.IfNoneMatch != nil { locationName := "If-None-Match" encoder.SetHeader(locationName).String(*v.IfNoneMatch) } @@ -4198,7 +4827,7 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetQuery("partNumber").Integer(*v.PartNumber) } - if v.Range != nil && len(*v.Range) > 0 { + if v.Range != nil { locationName := "Range" encoder.SetHeader(locationName).String(*v.Range) } @@ -4208,17 +4837,41 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.ResponseCacheControl != nil { + encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) + } + + if v.ResponseContentDisposition != nil { + encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) + } + + if v.ResponseContentEncoding != nil { + encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) + } + + if v.ResponseContentLanguage != nil { + encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) + } + + if v.ResponseContentType != nil { + encoder.SetQuery("response-content-type").String(*v.ResponseContentType) + } + + if v.ResponseExpires != nil { + encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) + } + + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -4240,6 +4893,10 @@ func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4276,6 +4933,8 @@ func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4287,7 +4946,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4305,6 +4964,10 @@ func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() st func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4341,6 +5004,8 @@ func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) Handl } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4365,6 +5030,10 @@ func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4401,6 +5070,8 @@ func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4412,7 +5083,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4430,6 +5101,10 @@ func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4466,6 +5141,8 @@ func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4477,7 +5154,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *L encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4495,6 +5172,10 @@ func (*awsRestxml_serializeOpListBuckets) ID() string { func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4522,11 +5203,17 @@ func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: err} } + if err := awsRestxml_serializeOpHttpBindingsListBucketsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + if request.Request, err = restEncoder.Encode(request.Request); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { @@ -4534,6 +5221,22 @@ func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, enc return fmt.Errorf("unsupported serialization of nil %T", v) } + if v.BucketRegion != nil { + encoder.SetQuery("bucket-region").String(*v.BucketRegion) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.MaxBuckets != nil { + encoder.SetQuery("max-buckets").Integer(*v.MaxBuckets) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + return nil } @@ -4547,6 +5250,10 @@ func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4583,6 +5290,8 @@ func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { @@ -4611,6 +5320,10 @@ func (*awsRestxml_serializeOpListMultipartUploads) ID() string { func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4647,6 +5360,8 @@ func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { @@ -4662,7 +5377,7 @@ func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipar encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4701,6 +5416,10 @@ func (*awsRestxml_serializeOpListObjects) ID() string { func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4737,6 +5456,8 @@ func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { @@ -4752,7 +5473,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4767,6 +5488,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -4801,6 +5525,10 @@ func (*awsRestxml_serializeOpListObjectsV2) ID() string { func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4837,6 +5565,8 @@ func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { @@ -4856,7 +5586,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4871,6 +5601,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -4909,6 +5642,10 @@ func (*awsRestxml_serializeOpListObjectVersions) ID() string { func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4945,6 +5682,8 @@ func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { @@ -4960,7 +5699,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4975,6 +5714,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -5013,6 +5755,10 @@ func (*awsRestxml_serializeOpListParts) ID() string { func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5049,6 +5795,8 @@ func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { @@ -5056,7 +5804,7 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5083,17 +5831,17 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -5115,6 +5863,10 @@ func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5175,6 +5927,8 @@ func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5187,7 +5941,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v * encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5205,6 +5959,10 @@ func (*awsRestxml_serializeOpPutBucketAcl) ID() string { func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5265,6 +6023,8 @@ func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { @@ -5282,37 +6042,37 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, e encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -5330,6 +6090,10 @@ func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5390,6 +6154,8 @@ func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5397,7 +6163,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5419,6 +6185,10 @@ func (*awsRestxml_serializeOpPutBucketCors) ID() string { func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5479,6 +6249,8 @@ func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -5491,12 +6263,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5514,6 +6286,10 @@ func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5574,6 +6350,8 @@ func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -5586,12 +6364,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncr encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5609,6 +6387,10 @@ func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() stri func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5669,6 +6451,8 @@ func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleS } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5693,6 +6477,10 @@ func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5753,6 +6541,8 @@ func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5760,7 +6550,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5782,6 +6572,10 @@ func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5842,6 +6636,8 @@ func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5854,11 +6650,16 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *P encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if len(v.TransitionDefaultMinimumObjectSize) > 0 { + locationName := "X-Amz-Transition-Default-Minimum-Object-Size" + encoder.SetHeader(locationName).String(string(v.TransitionDefaultMinimumObjectSize)) + } + return nil } @@ -5872,6 +6673,10 @@ func (*awsRestxml_serializeOpPutBucketLogging) ID() string { func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5932,6 +6737,8 @@ func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { @@ -5944,12 +6751,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLogging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5967,6 +6774,10 @@ func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6027,6 +6838,8 @@ func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -6034,7 +6847,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *Put return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6056,6 +6869,10 @@ func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6116,6 +6933,8 @@ func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { @@ -6123,7 +6942,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6146,6 +6965,10 @@ func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6206,6 +7029,8 @@ func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -6213,12 +7038,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6236,6 +7061,10 @@ func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6284,6 +7113,8 @@ func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -6301,12 +7132,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyIn encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6324,6 +7155,10 @@ func (*awsRestxml_serializeOpPutBucketReplication) ID() string { func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6384,6 +7219,8 @@ func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -6396,17 +7233,17 @@ func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketRep encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.Token != nil && len(*v.Token) > 0 { + if v.Token != nil { locationName := "X-Amz-Bucket-Object-Lock-Token" encoder.SetHeader(locationName).String(*v.Token) } @@ -6424,6 +7261,10 @@ func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6484,6 +7325,8 @@ func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { @@ -6496,12 +7339,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucket encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6519,6 +7362,10 @@ func (*awsRestxml_serializeOpPutBucketTagging) ID() string { func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6579,6 +7426,8 @@ func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -6591,12 +7440,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTagging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6614,6 +7463,10 @@ func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6674,6 +7527,8 @@ func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { @@ -6686,17 +7541,17 @@ func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVers encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -6714,6 +7569,10 @@ func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6774,6 +7633,8 @@ func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -6786,12 +7647,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsite encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6809,6 +7670,10 @@ func (*awsRestxml_serializeOpPutObject) ID() string { func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6857,6 +7722,8 @@ func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { @@ -6874,7 +7741,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -6884,37 +7751,42 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } @@ -6924,17 +7796,17 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6944,26 +7816,36 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfNoneMatch != nil { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -6976,9 +7858,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -7007,27 +7887,27 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -7037,16 +7917,21 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } + if v.WriteOffsetBytes != nil { + locationName := "X-Amz-Write-Offset-Bytes" + encoder.SetHeader(locationName).Long(*v.WriteOffsetBytes) + } + return nil } @@ -7060,6 +7945,10 @@ func (*awsRestxml_serializeOpPutObjectAcl) ID() string { func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7120,6 +8009,8 @@ func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { @@ -7137,37 +8028,37 @@ func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, e encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -7203,6 +8094,10 @@ func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7263,6 +8158,8 @@ func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { @@ -7275,12 +8172,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegal encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7316,6 +8213,10 @@ func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7376,6 +8277,8 @@ func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { @@ -7388,12 +8291,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObj encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7403,7 +8306,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObj encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.Token != nil && len(*v.Token) > 0 { + if v.Token != nil { locationName := "X-Amz-Bucket-Object-Lock-Token" encoder.SetHeader(locationName).String(*v.Token) } @@ -7421,6 +8324,10 @@ func (*awsRestxml_serializeOpPutObjectRetention) ID() string { func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7481,6 +8388,8 @@ func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { @@ -7498,12 +8407,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectReten encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7539,6 +8448,10 @@ func (*awsRestxml_serializeOpPutObjectTagging) ID() string { func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7599,6 +8512,8 @@ func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -7611,12 +8526,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTagging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7652,6 +8567,10 @@ func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7712,6 +8631,8 @@ func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -7724,12 +8645,12 @@ func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAcc encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7747,6 +8668,10 @@ func (*awsRestxml_serializeOpRestoreObject) ID() string { func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7758,7 +8683,7 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -7807,6 +8732,8 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { @@ -7819,7 +8746,7 @@ func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7855,6 +8782,10 @@ func (*awsRestxml_serializeOpSelectObjectContent) ID() string { func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7866,7 +8797,7 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -7909,6 +8840,8 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { @@ -7916,7 +8849,7 @@ func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectC return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7930,17 +8863,17 @@ func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectC } } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8037,6 +8970,10 @@ func (*awsRestxml_serializeOpUploadPart) ID() string { func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8085,6 +9022,8 @@ func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { @@ -8097,22 +9036,27 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } @@ -8122,12 +9066,12 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -8150,17 +9094,17 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8182,6 +9126,10 @@ func (*awsRestxml_serializeOpUploadPartCopy) ID() string { func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8218,6 +9166,8 @@ func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { @@ -8225,12 +9175,12 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.CopySource != nil && len(*v.CopySource) > 0 { + if v.CopySource != nil { locationName := "X-Amz-Copy-Source" encoder.SetHeader(locationName).String(*v.CopySource) } - if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + if v.CopySourceIfMatch != nil { locationName := "X-Amz-Copy-Source-If-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) } @@ -8240,7 +9190,7 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) } - if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + if v.CopySourceIfNoneMatch != nil { locationName := "X-Amz-Copy-Source-If-None-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) } @@ -8250,32 +9200,32 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) } - if v.CopySourceRange != nil && len(*v.CopySourceRange) > 0 { + if v.CopySourceRange != nil { locationName := "X-Amz-Copy-Source-Range" encoder.SetHeader(locationName).String(*v.CopySourceRange) } - if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + if v.CopySourceSSECustomerAlgorithm != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) } - if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + if v.CopySourceSSECustomerKey != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) } - if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + if v.CopySourceSSECustomerKeyMD5 != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + if v.ExpectedSourceBucketOwner != nil { locationName := "X-Amz-Source-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) } @@ -8298,17 +9248,17 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8330,6 +9280,10 @@ func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8341,7 +9295,7 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse") + opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -8378,6 +9332,8 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { @@ -8385,7 +9341,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AcceptRanges != nil && len(*v.AcceptRanges) > 0 { + if v.AcceptRanges != nil { locationName := "X-Amz-Fwd-Header-Accept-Ranges" encoder.SetHeader(locationName).String(*v.AcceptRanges) } @@ -8395,42 +9351,47 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "X-Amz-Fwd-Header-Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "X-Amz-Fwd-Header-Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "X-Amz-Fwd-Header-Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "X-Amz-Fwd-Header-Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } @@ -8440,12 +9401,12 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentRange != nil && len(*v.ContentRange) > 0 { + if v.ContentRange != nil { locationName := "X-Amz-Fwd-Header-Content-Range" encoder.SetHeader(locationName).String(*v.ContentRange) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "X-Amz-Fwd-Header-Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } @@ -8455,22 +9416,22 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) } - if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { + if v.ErrorCode != nil { locationName := "X-Amz-Fwd-Error-Code" encoder.SetHeader(locationName).String(*v.ErrorCode) } - if v.ErrorMessage != nil && len(*v.ErrorMessage) > 0 { + if v.ErrorMessage != nil { locationName := "X-Amz-Fwd-Error-Message" encoder.SetHeader(locationName).String(*v.ErrorMessage) } - if v.ETag != nil && len(*v.ETag) > 0 { + if v.ETag != nil { locationName := "X-Amz-Fwd-Header-Etag" encoder.SetHeader(locationName).String(*v.ETag) } - if v.Expiration != nil && len(*v.Expiration) > 0 { + if v.Expiration != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" encoder.SetHeader(locationName).String(*v.Expiration) } @@ -8488,9 +9449,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -8529,17 +9488,17 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.RequestCharged)) } - if v.RequestRoute != nil && len(*v.RequestRoute) > 0 { + if v.RequestRoute != nil { locationName := "X-Amz-Request-Route" encoder.SetHeader(locationName).String(*v.RequestRoute) } - if v.RequestToken != nil && len(*v.RequestToken) > 0 { + if v.RequestToken != nil { locationName := "X-Amz-Request-Token" encoder.SetHeader(locationName).String(*v.RequestToken) } - if v.Restore != nil && len(*v.Restore) > 0 { + if v.Restore != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Restore" encoder.SetHeader(locationName).String(*v.Restore) } @@ -8549,17 +9508,17 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -8579,7 +9538,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Integer(*v.TagCount) } - if v.VersionId != nil && len(*v.VersionId) > 0 { + if v.VersionId != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" encoder.SetHeader(locationName).String(*v.VersionId) } @@ -8995,6 +9954,17 @@ func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smi el := value.MemberElement(root) el.String(*v.ChecksumCRC32C) } + if v.ChecksumCRC64NVME != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC64NVME", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC64NVME) + } if v.ChecksumSHA1 != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -10587,71 +11557,66 @@ func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRule return nil } -func awsRestxml_serializeDocumentLifecycleRuleFilter(v types.LifecycleRuleFilter, value smithyxml.Value) error { +func awsRestxml_serializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, value smithyxml.Value) error { defer value.Close() - switch uv := v.(type) { - case *types.LifecycleRuleFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "And", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(v.And, el); err != nil { return err } - - case *types.LifecycleRuleFilterMemberObjectSizeGreaterThan: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + } + if v.ObjectSizeGreaterThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "ObjectSizeGreaterThan", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.Long(uv.Value) - - case *types.LifecycleRuleFilterMemberObjectSizeLessThan: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.Long(*v.ObjectSizeGreaterThan) + } + if v.ObjectSizeLessThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "ObjectSizeLessThan", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.Long(uv.Value) - - case *types.LifecycleRuleFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.Long(*v.ObjectSizeLessThan) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Prefix", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.LifecycleRuleFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Tag", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { return err } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - } return nil } @@ -10778,6 +11743,24 @@ func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smi return nil } +func awsRestxml_serializeDocumentMetadataTableConfiguration(v *types.MetadataTableConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.S3TablesDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3TablesDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3TablesDestination(v.S3TablesDestination, el); err != nil { + return err + } + } + return nil +} + func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { defer value.Close() if v.EventThreshold != nil { @@ -11091,6 +12074,17 @@ func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.Notifi func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { defer value.Close() + if v.ETag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ETag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ETag) + } if v.Key != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -11102,6 +12096,28 @@ func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, val el := value.MemberElement(root) el.String(*v.Key) } + if v.LastModifiedTime != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LastModifiedTime", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatHTTPDate(*v.LastModifiedTime)) + } + if v.Size != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Size", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(*v.Size) + } if v.VersionId != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -11759,49 +12775,44 @@ func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.Replication return nil } -func awsRestxml_serializeDocumentReplicationRuleFilter(v types.ReplicationRuleFilter, value smithyxml.Value) error { +func awsRestxml_serializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, value smithyxml.Value) error { defer value.Close() - switch uv := v.(type) { - case *types.ReplicationRuleFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "And", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(v.And, el); err != nil { return err } - - case *types.ReplicationRuleFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Prefix", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.ReplicationRuleFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Tag", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { return err } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - } return nil } @@ -12158,6 +13169,33 @@ func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml return nil } +func awsRestxml_serializeDocumentS3TablesDestination(v *types.S3TablesDestination, value smithyxml.Value) error { + defer value.Close() + if v.TableBucketArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TableBucketArn", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TableBucketArn) + } + if v.TableName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TableName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TableName) + } + return nil +} + func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { defer value.Close() if v.End != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go index ea3b9c82..ac5ab0be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -11,6 +11,7 @@ const ( // Values returns all known values for AnalyticsS3ExportFileFormat. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { return []AnalyticsS3ExportFileFormat{ @@ -27,8 +28,9 @@ const ( ) // Values returns all known values for ArchiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ArchiveStatus) Values() []ArchiveStatus { return []ArchiveStatus{ "ARCHIVE_ACCESS", @@ -45,8 +47,9 @@ const ( ) // Values returns all known values for BucketAccelerateStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { return []BucketAccelerateStatus{ "Enabled", @@ -65,8 +68,9 @@ const ( ) // Values returns all known values for BucketCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketCannedACL) Values() []BucketCannedACL { return []BucketCannedACL{ "private", @@ -90,17 +94,22 @@ const ( BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" + BucketLocationConstraintApSoutheast4 BucketLocationConstraint = "ap-southeast-4" + BucketLocationConstraintApSoutheast5 BucketLocationConstraint = "ap-southeast-5" BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" BucketLocationConstraintEu BucketLocationConstraint = "EU" BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" + BucketLocationConstraintEuCentral2 BucketLocationConstraint = "eu-central-2" BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" + BucketLocationConstraintIlCentral1 BucketLocationConstraint = "il-central-1" + BucketLocationConstraintMeCentral1 BucketLocationConstraint = "me-central-1" BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" @@ -112,6 +121,7 @@ const ( // Values returns all known values for BucketLocationConstraint. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (BucketLocationConstraint) Values() []BucketLocationConstraint { return []BucketLocationConstraint{ @@ -125,17 +135,22 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", + "ap-southeast-4", + "ap-southeast-5", "ca-central-1", "cn-north-1", "cn-northwest-1", "EU", "eu-central-1", + "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", + "il-central-1", + "me-central-1", "me-south-1", "sa-east-1", "us-east-2", @@ -156,8 +171,9 @@ const ( ) // Values returns all known values for BucketLogsPermission. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketLogsPermission) Values() []BucketLogsPermission { return []BucketLogsPermission{ "FULL_CONTROL", @@ -174,8 +190,9 @@ const ( ) // Values returns all known values for BucketType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketType) Values() []BucketType { return []BucketType{ "Directory", @@ -191,8 +208,9 @@ const ( ) // Values returns all known values for BucketVersioningStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketVersioningStatus) Values() []BucketVersioningStatus { return []BucketVersioningStatus{ "Enabled", @@ -204,21 +222,24 @@ type ChecksumAlgorithm string // Enum values for ChecksumAlgorithm const ( - ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" - ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" - ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" - ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" + ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" + ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" + ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" + ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" + ChecksumAlgorithmCrc64nvme ChecksumAlgorithm = "CRC64NVME" ) // Values returns all known values for ChecksumAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { return []ChecksumAlgorithm{ "CRC32", "CRC32C", "SHA1", "SHA256", + "CRC64NVME", } } @@ -230,14 +251,34 @@ const ( ) // Values returns all known values for ChecksumMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumMode) Values() []ChecksumMode { return []ChecksumMode{ "ENABLED", } } +type ChecksumType string + +// Enum values for ChecksumType +const ( + ChecksumTypeComposite ChecksumType = "COMPOSITE" + ChecksumTypeFullObject ChecksumType = "FULL_OBJECT" +) + +// Values returns all known values for ChecksumType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ChecksumType) Values() []ChecksumType { + return []ChecksumType{ + "COMPOSITE", + "FULL_OBJECT", + } +} + type CompressionType string // Enum values for CompressionType @@ -248,8 +289,9 @@ const ( ) // Values returns all known values for CompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (CompressionType) Values() []CompressionType { return []CompressionType{ "NONE", @@ -263,14 +305,17 @@ type DataRedundancy string // Enum values for DataRedundancy const ( DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" + DataRedundancySingleLocalZone DataRedundancy = "SingleLocalZone" ) // Values returns all known values for DataRedundancy. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DataRedundancy) Values() []DataRedundancy { return []DataRedundancy{ "SingleAvailabilityZone", + "SingleLocalZone", } } @@ -284,8 +329,9 @@ const ( // Values returns all known values for DeleteMarkerReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { return []DeleteMarkerReplicationStatus{ "Enabled", @@ -301,8 +347,9 @@ const ( ) // Values returns all known values for EncodingType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (EncodingType) Values() []EncodingType { return []EncodingType{ "url", @@ -343,8 +390,9 @@ const ( ) // Values returns all known values for Event. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Event) Values() []Event { return []Event{ "s3:ReducedRedundancyLostObject", @@ -387,8 +435,9 @@ const ( // Values returns all known values for ExistingObjectReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { return []ExistingObjectReplicationStatus{ "Enabled", @@ -405,8 +454,9 @@ const ( ) // Values returns all known values for ExpirationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpirationStatus) Values() []ExpirationStatus { return []ExpirationStatus{ "Enabled", @@ -422,8 +472,9 @@ const ( ) // Values returns all known values for ExpressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpressionType) Values() []ExpressionType { return []ExpressionType{ "SQL", @@ -440,8 +491,9 @@ const ( ) // Values returns all known values for FileHeaderInfo. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FileHeaderInfo) Values() []FileHeaderInfo { return []FileHeaderInfo{ "USE", @@ -459,8 +511,9 @@ const ( ) // Values returns all known values for FilterRuleName. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FilterRuleName) Values() []FilterRuleName { return []FilterRuleName{ "prefix", @@ -478,8 +531,9 @@ const ( // Values returns all known values for IntelligentTieringAccessTier. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { return []IntelligentTieringAccessTier{ "ARCHIVE_ACCESS", @@ -497,6 +551,7 @@ const ( // Values returns all known values for IntelligentTieringStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { return []IntelligentTieringStatus{ @@ -515,8 +570,9 @@ const ( ) // Values returns all known values for InventoryFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFormat) Values() []InventoryFormat { return []InventoryFormat{ "CSV", @@ -534,8 +590,9 @@ const ( ) // Values returns all known values for InventoryFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFrequency) Values() []InventoryFrequency { return []InventoryFrequency{ "Daily", @@ -553,8 +610,9 @@ const ( // Values returns all known values for InventoryIncludedObjectVersions. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { return []InventoryIncludedObjectVersions{ "All", @@ -584,8 +642,9 @@ const ( ) // Values returns all known values for InventoryOptionalField. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryOptionalField) Values() []InventoryOptionalField { return []InventoryOptionalField{ "Size", @@ -615,8 +674,9 @@ const ( ) // Values returns all known values for JSONType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (JSONType) Values() []JSONType { return []JSONType{ "DOCUMENT", @@ -629,14 +689,17 @@ type LocationType string // Enum values for LocationType const ( LocationTypeAvailabilityZone LocationType = "AvailabilityZone" + LocationTypeLocalZone LocationType = "LocalZone" ) // Values returns all known values for LocationType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (LocationType) Values() []LocationType { return []LocationType{ "AvailabilityZone", + "LocalZone", } } @@ -649,8 +712,9 @@ const ( ) // Values returns all known values for MetadataDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetadataDirective) Values() []MetadataDirective { return []MetadataDirective{ "COPY", @@ -667,8 +731,9 @@ const ( ) // Values returns all known values for MetricsStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetricsStatus) Values() []MetricsStatus { return []MetricsStatus{ "Enabled", @@ -685,8 +750,9 @@ const ( ) // Values returns all known values for MFADelete. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADelete) Values() []MFADelete { return []MFADelete{ "Enabled", @@ -703,8 +769,9 @@ const ( ) // Values returns all known values for MFADeleteStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADeleteStatus) Values() []MFADeleteStatus { return []MFADeleteStatus{ "Enabled", @@ -724,8 +791,9 @@ const ( ) // Values returns all known values for ObjectAttributes. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectAttributes) Values() []ObjectAttributes { return []ObjectAttributes{ "ETag", @@ -750,8 +818,9 @@ const ( ) // Values returns all known values for ObjectCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectCannedACL) Values() []ObjectCannedACL { return []ObjectCannedACL{ "private", @@ -772,8 +841,9 @@ const ( ) // Values returns all known values for ObjectLockEnabled. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockEnabled) Values() []ObjectLockEnabled { return []ObjectLockEnabled{ "Enabled", @@ -790,6 +860,7 @@ const ( // Values returns all known values for ObjectLockLegalHoldStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { return []ObjectLockLegalHoldStatus{ @@ -807,8 +878,9 @@ const ( ) // Values returns all known values for ObjectLockMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockMode) Values() []ObjectLockMode { return []ObjectLockMode{ "GOVERNANCE", @@ -825,8 +897,9 @@ const ( ) // Values returns all known values for ObjectLockRetentionMode. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { return []ObjectLockRetentionMode{ "GOVERNANCE", @@ -844,8 +917,9 @@ const ( ) // Values returns all known values for ObjectOwnership. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectOwnership) Values() []ObjectOwnership { return []ObjectOwnership{ "BucketOwnerPreferred", @@ -872,8 +946,9 @@ const ( ) // Values returns all known values for ObjectStorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectStorageClass) Values() []ObjectStorageClass { return []ObjectStorageClass{ "STANDARD", @@ -899,6 +974,7 @@ const ( // Values returns all known values for ObjectVersionStorageClass. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { return []ObjectVersionStorageClass{ @@ -915,6 +991,7 @@ const ( // Values returns all known values for OptionalObjectAttributes. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { return []OptionalObjectAttributes{ @@ -930,8 +1007,9 @@ const ( ) // Values returns all known values for OwnerOverride. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (OwnerOverride) Values() []OwnerOverride { return []OwnerOverride{ "Destination", @@ -947,8 +1025,9 @@ const ( ) // Values returns all known values for PartitionDateSource. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (PartitionDateSource) Values() []PartitionDateSource { return []PartitionDateSource{ "EventTime", @@ -965,8 +1044,9 @@ const ( ) // Values returns all known values for Payer. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Payer) Values() []Payer { return []Payer{ "Requester", @@ -986,8 +1066,9 @@ const ( ) // Values returns all known values for Permission. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Permission) Values() []Permission { return []Permission{ "FULL_CONTROL", @@ -1007,8 +1088,9 @@ const ( ) // Values returns all known values for Protocol. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Protocol) Values() []Protocol { return []Protocol{ "http", @@ -1025,8 +1107,9 @@ const ( ) // Values returns all known values for QuoteFields. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (QuoteFields) Values() []QuoteFields { return []QuoteFields{ "ALWAYS", @@ -1044,6 +1127,7 @@ const ( // Values returns all known values for ReplicaModificationsStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { return []ReplicaModificationsStatus{ @@ -1061,8 +1145,9 @@ const ( ) // Values returns all known values for ReplicationRuleStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { return []ReplicationRuleStatus{ "Enabled", @@ -1082,8 +1167,9 @@ const ( ) // Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationStatus) Values() []ReplicationStatus { return []ReplicationStatus{ "COMPLETE", @@ -1103,8 +1189,9 @@ const ( ) // Values returns all known values for ReplicationTimeStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { return []ReplicationTimeStatus{ "Enabled", @@ -1120,8 +1207,9 @@ const ( ) // Values returns all known values for RequestCharged. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestCharged) Values() []RequestCharged { return []RequestCharged{ "requester", @@ -1136,8 +1224,9 @@ const ( ) // Values returns all known values for RequestPayer. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestPayer) Values() []RequestPayer { return []RequestPayer{ "requester", @@ -1152,8 +1241,9 @@ const ( ) // Values returns all known values for RestoreRequestType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RestoreRequestType) Values() []RestoreRequestType { return []RestoreRequestType{ "SELECT", @@ -1170,8 +1260,9 @@ const ( ) // Values returns all known values for ServerSideEncryption. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ServerSideEncryption) Values() []ServerSideEncryption { return []ServerSideEncryption{ "AES256", @@ -1189,8 +1280,9 @@ const ( ) // Values returns all known values for SessionMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SessionMode) Values() []SessionMode { return []SessionMode{ "ReadOnly", @@ -1208,8 +1300,9 @@ const ( // Values returns all known values for SseKmsEncryptedObjectsStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { return []SseKmsEncryptedObjectsStatus{ "Enabled", @@ -1235,8 +1328,9 @@ const ( ) // Values returns all known values for StorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClass) Values() []StorageClass { return []StorageClass{ "STANDARD", @@ -1262,8 +1356,9 @@ const ( // Values returns all known values for StorageClassAnalysisSchemaVersion. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { return []StorageClassAnalysisSchemaVersion{ "V_1", @@ -1279,8 +1374,9 @@ const ( ) // Values returns all known values for TaggingDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TaggingDirective) Values() []TaggingDirective { return []TaggingDirective{ "COPY", @@ -1298,8 +1394,9 @@ const ( ) // Values returns all known values for Tier. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Tier) Values() []Tier { return []Tier{ "Standard", @@ -1308,6 +1405,26 @@ func (Tier) Values() []Tier { } } +type TransitionDefaultMinimumObjectSize string + +// Enum values for TransitionDefaultMinimumObjectSize +const ( + TransitionDefaultMinimumObjectSizeVariesByStorageClass TransitionDefaultMinimumObjectSize = "varies_by_storage_class" + TransitionDefaultMinimumObjectSizeAllStorageClasses128k TransitionDefaultMinimumObjectSize = "all_storage_classes_128K" +) + +// Values returns all known values for TransitionDefaultMinimumObjectSize. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TransitionDefaultMinimumObjectSize) Values() []TransitionDefaultMinimumObjectSize { + return []TransitionDefaultMinimumObjectSize{ + "varies_by_storage_class", + "all_storage_classes_128K", + } +} + type TransitionStorageClass string // Enum values for TransitionStorageClass @@ -1321,8 +1438,9 @@ const ( ) // Values returns all known values for TransitionStorageClass. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TransitionStorageClass) Values() []TransitionStorageClass { return []TransitionStorageClass{ "GLACIER", @@ -1344,8 +1462,9 @@ const ( ) // Values returns all known values for Type. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Type) Values() []Type { return []Type{ "CanonicalUser", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go index 166484f4..1070573a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -64,14 +64,46 @@ func (e *BucketAlreadyOwnedByYou) ErrorCode() string { } func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Object is archived and inaccessible until restored. If the object you are -// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 -// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access -// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can -// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. +// The existing object was created with a different encryption type. Subsequent +// +// write requests must include the appropriate encryption parameters in the request +// or while creating the session. +type EncryptionTypeMismatch struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *EncryptionTypeMismatch) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *EncryptionTypeMismatch) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *EncryptionTypeMismatch) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "EncryptionTypeMismatch" + } + return *e.ErrorCodeOverride +} +func (e *EncryptionTypeMismatch) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Object is archived and inaccessible until restored. +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 +// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep +// Archive Access tier, before you can retrieve the object you must first restore a +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html type InvalidObjectState struct { Message *string @@ -100,6 +132,69 @@ func (e *InvalidObjectState) ErrorCode() string { } func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// You may receive this error in multiple cases. Depending on the reason for the +// error, you may receive one of the messages below: +// +// - Cannot specify both a write offset value and user-defined object metadata +// for existing objects. +// +// - Checksum Type mismatch occurred, expected checksum Type: sha1, actual +// checksum Type: crc32c. +// +// - Request body cannot be empty when 'write offset' is specified. +type InvalidRequest struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequest) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequest) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequest) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequest" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequest) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The write offset value that you specified does not match the current object +// +// size. +type InvalidWriteOffset struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidWriteOffset) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidWriteOffset) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidWriteOffset) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidWriteOffset" + } + return *e.ErrorCodeOverride +} +func (e *InvalidWriteOffset) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The specified bucket does not exist. type NoSuchBucket struct { Message *string @@ -256,3 +351,32 @@ func (e *ObjectNotInActiveTierError) ErrorCode() string { return *e.ErrorCodeOverride } func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// You have attempted to add more parts than the maximum of 10000 that are +// +// allowed for this object. You can use the CopyObject operation to copy this +// object to another and then add more data to the newly copied object. +type TooManyParts struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyParts) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyParts) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyParts) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyParts" + } + return *e.ErrorCodeOverride +} +func (e *TooManyParts) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go index d3f7593f..1137501e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -9,9 +9,9 @@ import ( // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon S3 User Guide. +// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. +// +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config type AbortIncompleteMultipartUpload struct { // Specifies the number of days after which Amazon S3 aborts an incomplete @@ -22,8 +22,9 @@ type AbortIncompleteMultipartUpload struct { } // Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. +// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide. +// +// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html type AccelerateConfiguration struct { // Specifies the transfer acceleration status of the bucket. @@ -47,9 +48,10 @@ type AccessControlPolicy struct { // A container for information about access control for replicas. type AccessControlTranslation struct { - // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon S3 API Reference. + // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the + // Amazon S3 API Reference. + // + // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html // // This member is required. Owner OwnerOverride @@ -82,7 +84,7 @@ type AnalyticsConfiguration struct { // This member is required. Id *string - // Contains data related to access patterns to be collected and made available to + // Contains data related to access patterns to be collected and made available to // analyze the tradeoffs between different storage classes. // // This member is required. @@ -162,9 +164,10 @@ type AnalyticsS3BucketDestination struct { Format AnalyticsS3ExportFileFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. BucketAccountId *string // The prefix to use when exporting data. The prefix is prepended to all results. @@ -176,6 +179,11 @@ type AnalyticsS3BucketDestination struct { // In terms of implementation, a Bucket is a resource. type Bucket struct { + // BucketRegion indicates the Amazon Web Services region where the bucket is + // located. If the request contains at least one valid parameter, it is included in + // the response. + BucketRegion *string + // Date the bucket was created. This date can change when making changes to your // bucket, such as editing its bucket policy. CreationDate *time.Time @@ -187,12 +195,15 @@ type Bucket struct { } // Specifies the information about the bucket that will be created. For more -// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +// +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type BucketInfo struct { - // The number of Availability Zone that's used for redundancy for the bucket. + // The number of Zone (Availability Zone or Local Zone) that's used for redundancy + // for the bucket. DataRedundancy DataRedundancy // The type of bucket. @@ -202,8 +213,9 @@ type BucketInfo struct { } // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For -// more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide. +// +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html type BucketLifecycleConfiguration struct { // A lifecycle rule for individual objects in an Amazon S3 bucket. @@ -218,8 +230,10 @@ type BucketLifecycleConfiguration struct { type BucketLoggingStatus struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *LoggingEnabled noSmithyDocumentSerde @@ -228,42 +242,65 @@ type BucketLoggingStatus struct { // Contains all the possible checksum or digest values for an object. type Checksum struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is + // present if the object was uploaded with the CRC64NVME checksum algorithm, or if + // the object was uploaded without a checksum (and Amazon S3 added the default + // checksum, CRC64NVME , to the uploaded object). For more information, see [Checking object integrity] in + // the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + noSmithyDocumentSerde } @@ -283,8 +320,10 @@ type CommonPrefix struct { // The container for the completed multipart upload details. type CompletedMultipartUpload struct { - // Array of CompletedPart data types. If you do not supply a valid Part with your - // request, the service sends back an HTTP 400 response. + // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back an + // HTTP 400 response. Parts []CompletedPart noSmithyDocumentSerde @@ -293,40 +332,40 @@ type CompletedMultipartUpload struct { // Details of the parts that were uploaded. type CompletedPart struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the multipart upload request was created with the CRC32 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC32C checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 + // User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the multipart upload request was created with the SHA1 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the multipart upload request was created with the SHA256 checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -334,12 +373,14 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between 1 and // 10,000. + // // - General purpose buckets - In CompleteMultipartUpload , when a additional // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the // PartNumber must start at 1 and the part numbers must be consecutive. // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an // InvalidPartOrder error code. + // // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start // at 1 and the part numbers must be consecutive. PartNumber *int32 @@ -366,10 +407,12 @@ type Condition struct { // be /docs , which identifies all objects in the docs/ folder. Required when the // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals // is not specified. If both conditions are specified, both must be true for the - // redirect to be applied. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints KeyPrefixEquals *string noSmithyDocumentSerde @@ -382,30 +425,49 @@ type ContinuationEvent struct { // Container for all response elements. type CopyObjectResult struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is + // present if the object being copied was uploaded with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // Returns the ETag of the new object. The ETag reflects only changes to the // contents of an object, not its metadata. ETag *string @@ -419,40 +481,44 @@ type CopyObjectResult struct { // Container for all response elements. type CopyPartResult struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 32-bit CRC32 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 32-bit CRC32C checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 + // User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 256-bit SHA256 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag of the object. @@ -465,8 +531,9 @@ type CopyPartResult struct { } // Describes the cross-origin access configuration for objects in an Amazon S3 -// bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. +// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. +// +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html type CORSConfiguration struct { // A set of origins and methods (cross-origin access that you want to allow). You @@ -515,23 +582,37 @@ type CORSRule struct { // The configuration information for the bucket. type CreateBucketConfiguration struct { - // Specifies the information about the bucket that will be created. This - // functionality is only supported by directory buckets. + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. Bucket *BucketInfo - // Specifies the location where the bucket will be created. For directory buckets, - // the location type is Availability Zone. This functionality is only supported by - // directory buckets. + // Specifies the location where the bucket will be created. + // + // Directory buckets - The location type is Availability Zone or Local Zone. To + // use the Local Zone location type, your account must be enabled for Dedicated + // Local Zones. Otherwise, you get an HTTP 403 Forbidden error with the error code + // AccessDenied . To learn more, see [Enable accounts for Dedicated Local Zones] in the Amazon S3 User Guide. + // + // This functionality is only supported by directory buckets. + // + // [Enable accounts for Dedicated Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/opt-in-directory-bucket-lz.html Location *LocationInfo // Specifies the Region where the bucket will be created. You might choose a // Region to optimize latency, minimize costs, or address regulatory requirements. // For example, if you reside in Europe, you will probably find it advantageous to - // create buckets in the Europe (Ireland) Region. For more information, see - // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is - // created in the US East (N. Virginia) Region (us-east-1) by default. This - // functionality is not supported for directory buckets. + // create buckets in the Europe (Ireland) Region. + // + // If you don't specify a Region, the bucket is created in the US East (N. + // Virginia) Region (us-east-1) by default. Configurations using the value EU will + // create a bucket in eu-west-1 . + // + // For a list of the valid values for all of the Amazon Web Services Regions, see [Regions and Endpoints]. + // + // This functionality is not supported for directory buckets. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region LocationConstraint BucketLocationConstraint noSmithyDocumentSerde @@ -548,7 +629,9 @@ type CSVInput struct { // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character to - // indicate a comment line. The default character is # . Default: # + // indicate a comment line. The default character is # . + // + // Default: # Comments *string // A single character used to separate individual fields in a record. You can @@ -556,17 +639,26 @@ type CSVInput struct { FieldDelimiter *string // Describes the first line of input. Valid values are: + // // - NONE : First line is not a header. + // // - IGNORE : First line is a header, but you can't use the header values to // indicate the column in an expression. You can use column position (such as _1, // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). + // // - Use : First line is a header, and you can use the header value to identify a // column in an expression ( SELECT "name" FROM OBJECT ). FileHeaderInfo FileHeaderInfo // A single character used for escaping when the field delimiter is part of the // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . Type: String Default: " Ancestors: CSV + // quotation marks, as follows: " a , b " . + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string // A single character used for escaping the quotation mark character inside an @@ -599,7 +691,9 @@ type CSVOutput struct { QuoteEscapeCharacter *string // Indicates whether to use quotation marks around output fields. + // // - ALWAYS : Always use quotation marks for output fields. + // // - ASNEEDED : Use quotation marks for output fields when needed. QuoteFields QuoteFields @@ -610,9 +704,11 @@ type CSVOutput struct { noSmithyDocumentSerde } -// The container element for specifying the default Object Lock retention settings -// for new objects placed in the specified bucket. +// The container element for optionally specifying the default Object Lock +// retention settings for new objects placed in the specified bucket. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. type DefaultRetention struct { @@ -635,10 +731,12 @@ type DefaultRetention struct { // Container for the objects to delete. type Delete struct { - // The object to delete. Directory buckets - For directory buckets, an object - // that's composed entirely of whitespace characters is not supported by the - // DeleteObjects API operation. The request will receive a 400 Bad Request error - // and none of the objects in the request will be deleted. + // The object to delete. + // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects in the + // request will be deleted. // // This member is required. Objects []ObjectIdentifier @@ -656,21 +754,26 @@ type DeletedObject struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // The version ID of the delete marker created as a result of the DELETE // operation. If you delete a specific object version, the value returned by this - // header is the version ID of the object version deleted. This functionality is - // not supported for directory buckets. + // header is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. DeleteMarkerVersionId *string // The name of the deleted object. Key *string - // The version ID of the deleted object. This functionality is not supported for - // directory buckets. + // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -689,7 +792,7 @@ type DeleteMarkerEntry struct { // Date and time when the object was last modified. LastModified *time.Time - // The account that created the delete marker.> + // The account that created the delete marker. Owner *Owner // Version ID of an object. @@ -703,17 +806,20 @@ type DeleteMarkerEntry struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example -// configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) -// . For more information about delete marker replication, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) -// . If you are using an earlier version of the replication configuration, Amazon -// S3 handles replication of delete markers differently. For more information, see -// Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . +// configuration, see [Basic Rule Configuration]. +// +// For more information about delete marker replication, see [Basic Rule Configuration]. +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations type DeleteMarkerReplication struct { - // Indicates whether to replicate delete markers. Indicates whether to replicate - // delete markers. + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. Status DeleteMarkerReplicationStatus noSmithyDocumentSerde @@ -723,7 +829,7 @@ type DeleteMarkerReplication struct { // for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store // the results. // // This member is required. @@ -740,29 +846,32 @@ type Destination struct { // Amazon S3 to change replica ownership to the Amazon Web Services account that // owns the destination bucket by specifying the AccessControlTranslation // property, this is the account ID of the destination bucket owner. For more - // information, see Replication Additional Configuration: Changing the Replica - // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) - // in the Amazon S3 User Guide. + // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide. + // + // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html Account *string // A container that provides information about encryption. If // SourceSelectionCriteria is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration - // A container specifying replication metrics-related settings enabling + // A container specifying replication metrics-related settings enabling // replication metrics and events. Metrics *Metrics - // A container specifying S3 Replication Time Control (S3 RTC), including whether + // A container specifying S3 Replication Time Control (S3 RTC), including whether // S3 RTC is enabled and the time when all objects and operations on objects must // be replicated. Must be specified together with a Metrics block. ReplicationTime *ReplicationTime - // The storage class to use when replicating objects, such as S3 Standard or + // The storage class to use when replicating objects, such as S3 Standard or // reduced redundancy. By default, Amazon S3 uses the storage class of the source - // object to create the object replica. For valid values, see the StorageClass - // element of the PUT Bucket replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon S3 API Reference. + // object to create the object replica. + // + // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3 + // API Reference. + // + // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html StorageClass StorageClass noSmithyDocumentSerde @@ -784,8 +893,9 @@ type Encryption struct { // If the encryption type is aws:kms , this optional value specifies the ID of the // symmetric encryption customer managed key to use for encryption of job results. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSKeyId *string noSmithyDocumentSerde @@ -793,14 +903,21 @@ type Encryption struct { // Specifies encryption-related information for an Amazon S3 bucket that is a // destination for replicated objects. +// +// If you're specifying a customer managed KMS key, we recommend using a fully +// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the +// key within the requester’s account. This behavior can result in data that's +// encrypted with a KMS key that belongs to the requester, and not the bucket +// owner. type EncryptionConfiguration struct { // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for // the destination bucket. Amazon S3 uses this key to encrypt replica objects. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html ReplicaKmsKeyID *string noSmithyDocumentSerde @@ -819,414 +936,766 @@ type Error struct { // The error code is a string that uniquely identifies an error condition. It is // meant to be read and understood by programs that detect and handle errors by // type. The following is a list of Amazon S3 error codes. For more information, - // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) - // . + // see [Error responses]. + // // - Code: AccessDenied + // // - Description: Access Denied + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AccountProblem + // // - Description: There is a problem with your Amazon Web Services account that // prevents the action from completing successfully. Contact Amazon Web Services // Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AllAccessDisabled + // // - Description: All access to this Amazon S3 resource has been disabled. // Contact Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AmbiguousGrantByEmailAddress + // // - Description: The email address you provided is associated with more than // one account. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: AuthorizationHeaderMalformed + // // - Description: The authorization header you provided is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - HTTP Status Code: N/A + // // - Code: BadDigest + // // - Description: The Content-MD5 you specified did not match what we received. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyExists + // // - Description: The requested bucket name is not available. The bucket // namespace is shared by all users of the system. Please select a different name // and try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyOwnedByYou + // // - Description: The bucket you tried to create already exists, and you own it. // Amazon S3 returns this error in all Amazon Web Services Regions except in the // North Virginia Region. For legacy compatibility, if you re-create an existing // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 // OK and resets the bucket access control lists (ACLs). + // // - Code: 409 Conflict (in all Regions except the North Virginia Region) + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketNotEmpty + // // - Description: The bucket you tried to delete is not empty. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: CredentialsNotSupported + // // - Description: This request does not support credentials. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: CrossLocationLoggingProhibited + // // - Description: Cross-location logging not allowed. Buckets in one geographic // location cannot log information to a bucket in another location. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooSmall + // // - Description: Your proposed upload is smaller than the minimum allowed // object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooLarge + // // - Description: Your proposed upload exceeds the maximum allowed object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: ExpiredToken + // // - Description: The provided token has expired. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IllegalVersioningConfigurationException + // // - Description: Indicates that the versioning configuration specified in the // request is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncompleteBody + // // - Description: You did not provide the number of bytes specified by the // Content-Length HTTP header + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncorrectNumberOfFilesInPostRequest + // // - Description: POST requires exactly one file upload per request. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InlineDataTooLarge + // // - Description: Inline data exceeds the maximum allowed size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InternalError + // // - Description: We encountered an internal error. Please try again. + // // - HTTP Status Code: 500 Internal Server Error + // // - SOAP Fault Code Prefix: Server + // // - Code: InvalidAccessKeyId + // // - Description: The Amazon Web Services access key ID you provided does not // exist in our records. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidAddressingHeader + // // - Description: You must specify the Anonymous role. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidArgument + // // - Description: Invalid Argument + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketName + // // - Description: The specified bucket is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketState + // // - Description: The request is not valid with the current state of the bucket. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidDigest + // // - Description: The Content-MD5 you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidEncryptionAlgorithmError + // // - Description: The encryption request you specified is not valid. The valid // value is AES256. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidLocationConstraint + // // - Description: The specified location constraint is not valid. For more - // information about Regions, see How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // . + // information about Regions, see [How to Select a Region for Your Buckets]. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidObjectState + // // - Description: The action is not valid for the current state of the object. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPart + // // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified entity tag might not have // matched the part's entity tag. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPartOrder + // // - Description: The list of parts was not in ascending order. Parts list must // be specified in order by part number. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPayer + // // - Description: All access to this object has been disabled. Please contact // Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPolicyDocument + // // - Description: The content of the form does not meet the conditions specified // in the policy document. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRange + // // - Description: The requested range cannot be satisfied. + // // - HTTP Status Code: 416 Requested Range Not Satisfiable + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Please use AWS4-HMAC-SHA256 . + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: SOAP requests must be made over an HTTPS connection. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with non-DNS compliant names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with periods (.) in their names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual // style requests. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest - // - Description: Amazon S3 Transfer Accelerate is not configured on this - // bucket. + // + // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidSecurity + // // - Description: The provided security credentials are not valid. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidSOAPRequest + // // - Description: The SOAP request body is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidStorageClass + // // - Description: The storage class you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidTargetBucketForLogging + // // - Description: The target bucket for logging does not exist, is not owned by // you, or does not have the appropriate grants for the log-delivery group. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidToken + // // - Description: The provided token is malformed or otherwise invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidURI + // // - Description: Couldn't parse the specified URI. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: KeyTooLongError + // // - Description: Your key is too long. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedACLError + // // - Description: The XML you provided was not well-formed or did not validate // against our published schema. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedPOSTRequest + // // - Description: The body of your POST request is not well-formed // multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedXML + // // - Description: This happens when the user sends malformed XML (XML that // doesn't conform to the published XSD) for the configuration. The error message // is, "The XML you provided was not well-formed or did not validate against our // published schema." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxMessageLengthExceeded + // // - Description: Your request was too big. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxPostPreDataLengthExceededError + // // - Description: Your POST request fields preceding the upload file were too // large. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MetadataTooLarge - // - Description: Your metadata headers exceed the maximum allowed metadata - // size. + // + // - Description: Your metadata headers exceed the maximum allowed metadata size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MethodNotAllowed + // // - Description: The specified method is not allowed against this resource. + // // - HTTP Status Code: 405 Method Not Allowed + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingAttachment + // // - Description: A SOAP attachment was expected, but none were found. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingContentLength + // // - Description: You must provide the Content-Length HTTP header. + // // - HTTP Status Code: 411 Length Required + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingRequestBodyError + // // - Description: This happens when the user sends an empty XML document as a // request. The error message is, "Request body is empty." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityElement + // // - Description: The SOAP 1.1 request is missing a security element. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityHeader + // // - Description: Your request is missing a required header. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoLoggingStatusForKey + // // - Description: There is no such thing as a logging status subresource for a // key. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucket + // // - Description: The specified bucket does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucketPolicy + // // - Description: The specified bucket does not have a bucket policy. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchKey + // // - Description: The specified key does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchLifecycleConfiguration + // // - Description: The lifecycle configuration does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchUpload + // // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchVersion + // // - Description: Indicates that the version ID specified in the request does // not match an existing version. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NotImplemented + // // - Description: A header you provided implies functionality that is not // implemented. + // // - HTTP Status Code: 501 Not Implemented + // // - SOAP Fault Code Prefix: Server + // // - Code: NotSignedUp + // // - Description: Your account is not signed up for the Amazon S3 service. You // must sign up before you can use Amazon S3. You can sign up at the following URL: - // Amazon S3 (http://aws.amazon.com/s3) + // [Amazon S3] + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: OperationAborted + // // - Description: A conflicting conditional action is currently in progress // against this resource. Try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: PermanentRedirect + // // - Description: The bucket you are attempting to access must be addressed // using the specified endpoint. Send all future requests to this endpoint. + // // - HTTP Status Code: 301 Moved Permanently + // // - SOAP Fault Code Prefix: Client + // // - Code: PreconditionFailed + // // - Description: At least one of the preconditions you specified did not hold. + // // - HTTP Status Code: 412 Precondition Failed + // // - SOAP Fault Code Prefix: Client + // // - Code: Redirect + // // - Description: Temporary redirect. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: RestoreAlreadyInProgress + // // - Description: Object restore is already in progress. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestIsNotMultiPartContent + // // - Description: Bucket POST must be of the enclosure-type multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeout + // // - Description: Your socket connection to the server was not read from or // written to within the timeout period. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeTooSkewed + // // - Description: The difference between the request time and the server's time // is too large. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTorrentOfBucketError + // // - Description: Requesting the torrent file of a bucket is not permitted. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: SignatureDoesNotMatch + // // - Description: The request signature we calculated does not match the // signature you provided. Check your Amazon Web Services secret access key and - // signing method. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) - // for details. + // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: ServiceUnavailable + // // - Description: Service is unable to handle request. + // // - HTTP Status Code: 503 Service Unavailable + // // - SOAP Fault Code Prefix: Server + // // - Code: SlowDown + // // - Description: Reduce your request rate. + // // - HTTP Status Code: 503 Slow Down + // // - SOAP Fault Code Prefix: Server + // // - Code: TemporaryRedirect + // // - Description: You are being redirected to the bucket while DNS updates. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: TokenRefreshRequired + // // - Description: The provided token must be refreshed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: TooManyBuckets + // // - Description: You have attempted to create more buckets than allowed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnexpectedContent + // // - Description: This request does not support content. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnresolvableGrantByEmailAddress + // // - Description: The email address you provided does not match any account on // record. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UserKeyMustBeSpecified + // // - Description: The bucket POST must contain the specified field name. If it // is specified, check the order of the fields. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // + // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Amazon S3]: http://aws.amazon.com/s3 + // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html Code *string // The error key. @@ -1240,21 +1709,104 @@ type Error struct { // error message. Message *string - // The version ID of the error. This functionality is not supported for directory - // buckets. + // The version ID of the error. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } +// If the CreateBucketMetadataTableConfiguration request succeeds, but S3 +// +// Metadata was unable to create the table, this structure contains the error code +// and error message. +type ErrorDetails struct { + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error code. + // The possible error codes and error messages are as follows: + // + // - AccessDeniedCreatingResources - You don't have sufficient permissions to + // create the required resources. Make sure that you have + // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and + // s3tables:PutTablePolicy permissions, and then try again. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - AccessDeniedWritingToTable - Unable to write to the metadata table because + // of missing resource permissions. To fix the resource policy, Amazon S3 needs to + // create a new metadata table. To create a new metadata table, you must delete the + // metadata configuration for this bucket, and then create a new metadata + // configuration. + // + // - DestinationTableNotFound - The destination table doesn't exist. To create a + // new metadata table, you must delete the metadata configuration for this bucket, + // and then create a new metadata configuration. + // + // - ServerInternalError - An internal error has occurred. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableAlreadyExists - The table that you specified already exists in the + // table bucket's namespace. Specify a different table name. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableBucketNotFound - The table bucket that you specified doesn't exist in + // this Amazon Web Services Region and account. Create or choose a different table + // bucket. To create a new metadata table, you must delete the metadata + // configuration for this bucket, and then create a new metadata configuration. + ErrorCode *string + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error + // message. The possible error codes and error messages are as follows: + // + // - AccessDeniedCreatingResources - You don't have sufficient permissions to + // create the required resources. Make sure that you have + // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and + // s3tables:PutTablePolicy permissions, and then try again. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - AccessDeniedWritingToTable - Unable to write to the metadata table because + // of missing resource permissions. To fix the resource policy, Amazon S3 needs to + // create a new metadata table. To create a new metadata table, you must delete the + // metadata configuration for this bucket, and then create a new metadata + // configuration. + // + // - DestinationTableNotFound - The destination table doesn't exist. To create a + // new metadata table, you must delete the metadata configuration for this bucket, + // and then create a new metadata configuration. + // + // - ServerInternalError - An internal error has occurred. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableAlreadyExists - The table that you specified already exists in the + // table bucket's namespace. Specify a different table name. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableBucketNotFound - The table bucket that you specified doesn't exist in + // this Amazon Web Services Region and account. Create or choose a different table + // bucket. To create a new metadata table, you must delete the metadata + // configuration for this bucket, and then create a new metadata configuration. + ErrorMessage *string + + noSmithyDocumentSerde +} + // The error information. type ErrorDocument struct { - // The object key name to use when a 4XX class error occurs. Replacement must be - // made for object keys containing special characters (such as carriage returns) - // when using XML requests. For more information, see XML related object key - // constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string @@ -1267,9 +1819,12 @@ type EventBridgeConfiguration struct { noSmithyDocumentSerde } -// Optional configuration to replicate existing source bucket objects. For more -// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) -// in the Amazon S3 User Guide. +// Optional configuration to replicate existing source bucket objects. +// +// This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in +// the Amazon S3 User Guide. +// +// [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html type ExistingObjectReplication struct { // Specifies whether Amazon S3 replicates existing source bucket objects. @@ -1280,15 +1835,23 @@ type ExistingObjectReplication struct { noSmithyDocumentSerde } -// Specifies the Amazon S3 object key name to filter on and whether to filter on -// the suffix or prefix of the key name. +// Specifies the Amazon S3 object key name to filter on. An object key name is the +// name assigned to an object in your Amazon S3 bucket. You specify whether to +// filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can use +// to organize objects. For example, you can start the key names of related objects +// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to +// find objects in a bucket with key names that have the same prefix. A suffix is +// similar to a prefix, but it is at the end of the object key name instead of at +// the beginning. type FilterRule struct { // The object key name prefix or suffix identifying one or more objects to which // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the + // Amazon S3 User Guide. + // + // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html Name FilterRuleName // The value that the filter searches for in object key names. @@ -1297,6 +1860,36 @@ type FilterRule struct { noSmithyDocumentSerde } +// The metadata table configuration for a general purpose bucket. +type GetBucketMetadataTableConfigurationResult struct { + + // The metadata table configuration for a general purpose bucket. + // + // This member is required. + MetadataTableConfigurationResult *MetadataTableConfigurationResult + + // The status of the metadata table. The status values are: + // + // - CREATING - The metadata table is in the process of being created in the + // specified table bucket. + // + // - ACTIVE - The metadata table has been created successfully and records are + // being delivered to the table. + // + // - FAILED - Amazon S3 is unable to create the metadata table, or Amazon S3 is + // unable to deliver records. See ErrorDetails for details. + // + // This member is required. + Status *string + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error code + // and error message. + Error *ErrorDetails + + noSmithyDocumentSerde +} + // A collection of parts associated with a multipart upload. type GetObjectAttributesParts struct { @@ -1318,10 +1911,12 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // // - General purpose buckets - For GetObjectAttributes , if a additional checksum // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the // request, the response doesn't return Part . + // // - Directory buckets - For GetObjectAttributes , no matter whether a additional // checksum is applied to the object specified in the request, the response returns // Part . @@ -1367,19 +1962,31 @@ type Grantee struct { // Screen name of the grantee. DisplayName *string - // Email address of the grantee. Using email addresses to specify a grantee is - // only supported in the following Amazon Web Services Regions: + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) - // For a list of all the Amazon S3 supported Regions and endpoints, see Regions - // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the + // Amazon Web Services General Reference. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region EmailAddress *string // The canonical user ID of the grantee. @@ -1395,13 +2002,15 @@ type Grantee struct { type IndexDocument struct { // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request to - // samplebucket/images/ the data that is returned will be for the object with the - // key name images/index.html) The suffix must not be empty and must not include a - // slash character. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // endpoint. (For example, if the suffix is index.html and you make a request to + // samplebucket/images/ , the data that is returned will be for the object with the + // key name images/index.html .) The suffix must not be empty and must not include + // a slash character. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Suffix *string @@ -1412,12 +2021,14 @@ type IndexDocument struct { // Container element that identifies who initiated the multipart upload. type Initiator struct { - // Name of the Principal. This functionality is not supported for directory - // buckets. + // Name of the Principal. + // + // This functionality is not supported for directory buckets. DisplayName *string // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // // Directory buckets - If the principal is an Amazon Web Services account, it // provides the Amazon Web Services account ID. If the principal is an IAM User, it // provides a user ARN value. @@ -1460,10 +2071,11 @@ type IntelligentTieringAndOperator struct { noSmithyDocumentSerde } -// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For -// information about the S3 Intelligent-Tiering storage class, see Storage class -// for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access type IntelligentTieringConfiguration struct { // The ID used to identify the S3 Intelligent-Tiering configuration. @@ -1498,10 +2110,12 @@ type IntelligentTieringFilter struct { And *IntelligentTieringAndOperator // An object key name prefix that identifies the subset of objects to which the - // rule applies. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints Prefix *string // A container of a key value name pair. @@ -1511,8 +2125,9 @@ type IntelligentTieringFilter struct { } // Specifies the inventory configuration for an Amazon S3 bucket. For more -// information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon S3 API Reference. +// information, see [GET Bucket inventory]in the Amazon S3 API Reference. +// +// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html type InventoryConfiguration struct { // Contains information about where to publish the inventory results. @@ -1607,9 +2222,10 @@ type InventoryS3BucketDestination struct { Format InventoryFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. AccountId *string // Contains the type of server-side encryption used to encrypt the inventory @@ -1656,8 +2272,9 @@ type JSONOutput struct { type LambdaFunctionConfiguration struct { // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -1669,8 +2286,9 @@ type LambdaFunctionConfiguration struct { LambdaFunctionArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -1680,13 +2298,18 @@ type LambdaFunctionConfiguration struct { noSmithyDocumentSerde } -// Container for the expiration for the lifecycle of the object. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// Container for the expiration for the lifecycle of the object. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleExpiration struct { // Indicates at what date the object is to be moved or deleted. The date value // must conform to the ISO 8601 format. The time is always midnight UTC. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. Date *time.Time // Indicates the lifetime, in days, of the objects that are subject to the rule. @@ -1697,14 +2320,19 @@ type LifecycleExpiration struct { // versions. If set to true, the delete marker will be expired; if set to false the // policy takes no action. This cannot be specified with Days or Date in a // Lifecycle Expiration Policy. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpiredObjectDeleteMarker *bool noSmithyDocumentSerde } -// A lifecycle rule for individual objects in an Amazon S3 bucket. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleRule struct { // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is @@ -1715,9 +2343,9 @@ type LifecycleRule struct { // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For - // more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. + // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload // Specifies the expiration for the lifecycle of the object in the form of date, @@ -1727,7 +2355,9 @@ type LifecycleRule struct { // The Filter is used to identify objects that a Lifecycle Rule applies to. A // Filter must have exactly one of Prefix , Tag , or And specified. Filter is // required if the LifecycleRule does not contain a Prefix element. - Filter LifecycleRuleFilter + // + // Tag filters are not supported for directory buckets. + Filter *LifecycleRuleFilter // Unique identifier for the rule. The value cannot be longer than 255 characters. ID *string @@ -1737,6 +2367,9 @@ type LifecycleRule struct { // configuration action on a bucket that has versioning enabled (or suspended) to // request that Amazon S3 delete noncurrent object versions at a specific period in // the object's lifetime. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. NoncurrentVersionExpiration *NoncurrentVersionExpiration // Specifies the transition rule for the lifecycle rule that describes when @@ -1744,18 +2377,26 @@ type LifecycleRule struct { // versioning-enabled (or versioning is suspended), you can set this action to // request that Amazon S3 transition noncurrent object versions to a specific // storage class at a set period in the object's lifetime. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. NoncurrentVersionTransitions []NoncurrentVersionTransition // Prefix identifying one or more objects to which the rule applies. This is no - // longer used; use Filter instead. Replacement must be made for object keys - // containing special characters (such as carriage returns) when using XML - // requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string // Specifies when an Amazon S3 object transitions to a specified storage class. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. Transitions []Transition noSmithyDocumentSerde @@ -1783,80 +2424,54 @@ type LifecycleRuleAndOperator struct { } // The Filter is used to identify objects that a Lifecycle Rule applies to. A -// Filter must have exactly one of Prefix , Tag , or And specified. -// -// The following types satisfy this interface: -// -// LifecycleRuleFilterMemberAnd -// LifecycleRuleFilterMemberObjectSizeGreaterThan -// LifecycleRuleFilterMemberObjectSizeLessThan -// LifecycleRuleFilterMemberPrefix -// LifecycleRuleFilterMemberTag -type LifecycleRuleFilter interface { - isLifecycleRuleFilter() -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more -// predicates. The Lifecycle Rule will apply to any object matching all of the -// predicates configured inside the And operator. -type LifecycleRuleFilterMemberAnd struct { - Value LifecycleRuleAndOperator - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberAnd) isLifecycleRuleFilter() {} - -// Minimum object size to which the rule applies. -type LifecycleRuleFilterMemberObjectSizeGreaterThan struct { - Value int64 - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberObjectSizeGreaterThan) isLifecycleRuleFilter() {} - -// Maximum object size to which the rule applies. -type LifecycleRuleFilterMemberObjectSizeLessThan struct { - Value int64 - - noSmithyDocumentSerde -} +// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , +// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the +// Lifecycle Rule applies to all objects in the bucket. +type LifecycleRuleFilter struct { -func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or more + // predicates. The Lifecycle Rule will apply to any object matching all of the + // predicates configured inside the And operator. + And *LifecycleRuleAndOperator -// Prefix identifying one or more objects to which the rule applies. Replacement -// must be made for object keys containing special characters (such as carriage -// returns) when using XML requests. For more information, see XML related object -// key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . -type LifecycleRuleFilterMemberPrefix struct { - Value string + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 - noSmithyDocumentSerde -} + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 -func (*LifecycleRuleFilterMemberPrefix) isLifecycleRuleFilter() {} + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + Prefix *string -// This tag must exist in the object's tag set in order for the rule to apply. -type LifecycleRuleFilterMemberTag struct { - Value Tag + // This tag must exist in the object's tag set in order for the rule to apply. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + Tag *Tag noSmithyDocumentSerde } -func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} - -// Specifies the location where the bucket will be created. For directory buckets, -// the location type is Availability Zone. For more information about directory -// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// Specifies the location where the bucket will be created. +// +// For directory buckets, the location type is Availability Zone or Local Zone. +// For more information about directory buckets, see [Working with directory buckets]in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +// +// [Working with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type LocationInfo struct { - // The name of the location where the bucket will be created. For directory - // buckets, the AZ ID of the Availability Zone where the bucket will be created. An - // example AZ ID value is usw2-az2 . + // The name of the location where the bucket will be created. + // + // For directory buckets, the name of the location is the Zone ID of the + // Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An + // example AZ ID value is usw2-az1 . Name *string // The type of location where the bucket will be created. @@ -1866,8 +2481,10 @@ type LocationInfo struct { } // Describes where logs are stored and the prefix that Amazon S3 assigns to all -// log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon S3 API Reference. +// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API +// Reference. +// +// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html type LoggingEnabled struct { // Specifies the bucket where you want Amazon S3 to store server access logs. You @@ -1887,10 +2504,12 @@ type LoggingEnabled struct { // This member is required. TargetPrefix *string - // Container for granting information. Buckets that use the bucket owner enforced - // setting for Object Ownership don't support target grants. For more information, - // see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) - // in the Amazon S3 User Guide. + // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. + // + // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general TargetGrants []TargetGrant // Amazon S3 key format for log objects. @@ -1911,16 +2530,49 @@ type MetadataEntry struct { noSmithyDocumentSerde } -// A container specifying replication metrics-related settings enabling +// The metadata table configuration for a general purpose bucket. +type MetadataTableConfiguration struct { + + // The destination information for the metadata table configuration. The + // destination table bucket must be in the same Region and Amazon Web Services + // account as the general purpose bucket. The specified metadata table name must be + // unique within the aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + S3TablesDestination *S3TablesDestination + + noSmithyDocumentSerde +} + +// The metadata table configuration for a general purpose bucket. The destination +// +// table bucket must be in the same Region and Amazon Web Services account as the +// general purpose bucket. The specified metadata table name must be unique within +// the aws_s3_metadata namespace in the destination table bucket. +type MetadataTableConfigurationResult struct { + + // The destination information for the metadata table configuration. The + // destination table bucket must be in the same Region and Amazon Web Services + // account as the general purpose bucket. The specified metadata table name must be + // unique within the aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + S3TablesDestinationResult *S3TablesDestinationResult + + noSmithyDocumentSerde +} + +// A container specifying replication metrics-related settings enabling +// // replication metrics and events. type Metrics struct { - // Specifies whether the replication metrics are enabled. + // Specifies whether the replication metrics are enabled. // // This member is required. Status MetricsStatus - // A container specifying the time threshold for emitting the + // A container specifying the time threshold for emitting the // s3:Replication:OperationMissedThreshold event. EventThreshold *ReplicationTimeValue @@ -1948,8 +2600,9 @@ type MetricsAndOperator struct { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an // existing metrics configuration, note that this is a full replacement of the // existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// . +// keep, they are erased. For more information, see [PutBucketMetricsConfiguration]. +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html type MetricsConfiguration struct { // The ID used to identify the metrics configuration. The ID has a 64 character @@ -1969,8 +2622,7 @@ type MetricsConfiguration struct { // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, an // object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more -// information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// . +// information, see [PutBucketMetricsConfiguration]. // // The following types satisfy this interface: // @@ -1978,6 +2630,8 @@ type MetricsConfiguration struct { // MetricsFilterMemberAnd // MetricsFilterMemberPrefix // MetricsFilterMemberTag +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html type MetricsFilter interface { isMetricsFilter() } @@ -2026,6 +2680,12 @@ type MultipartUpload struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // Date and time at which the multipart upload was initiated. Initiated *time.Time @@ -2036,13 +2696,16 @@ type MultipartUpload struct { Key *string // Specifies the owner of the object that is part of the multipart upload. - // Directory buckets - The bucket owner is returned as the object owner for all the - // objects. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the objects. Owner *Owner - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass StorageClass // Upload ID that identifies the multipart upload. @@ -2056,20 +2719,31 @@ type MultipartUpload struct { // configuration action on a bucket that has versioning enabled (or suspended) to // request that Amazon S3 delete noncurrent object versions at a specific period in // the object's lifetime. +// +// This parameter applies to general purpose buckets only. It is not supported for +// directory bucket lifecycle configurations. type NoncurrentVersionExpiration struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any + // additional noncurrent versions beyond the specified number to retain. For more + // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. The value must be a non-zero positive integer. - // For information about the noncurrent days calculations, see How Amazon S3 - // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3 + // User Guide. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 noSmithyDocumentSerde @@ -2084,18 +2758,20 @@ type NoncurrentVersionExpiration struct { // specific period in the object's lifetime. type NoncurrentVersionTransition struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates How Long an Object Has Been - // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide. + // + // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 // The class of storage used to store the object. @@ -2127,8 +2803,9 @@ type NotificationConfiguration struct { } // Specifies object key name filtering rules. For information about key name -// filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) -// in the Amazon S3 User Guide. +// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. +// +// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html type NotificationConfigurationFilter struct { // A container for object key name prefix and suffix filtering rules. @@ -2143,21 +2820,31 @@ type Object struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm []ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // The entity tag is a hash of the object. The ETag reflects changes only to the // contents of an object, not its metadata. The ETag may or may not be an MD5 // digest of the object data. Whether or not it is depends on how the object was // created and how it is encrypted as described below: + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 // or plaintext, have ETags that are an MD5 digest of their object data. + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-C // or SSE-KMS, have ETags that are not an MD5 digest of their object data. + // // - If an object is created by either the Multipart Upload or Part Copy // operation, the ETag is not an MD5 digest, regardless of the method of // encryption. If an object is larger than 16 MB, the Amazon Web Services // Management Console will upload or copy that object as a Multipart Upload, and // therefore the ETag will not be an MD5 digest. + // // Directory buckets - MD5 is not supported by directory buckets. ETag *string @@ -2168,25 +2855,32 @@ type Object struct { // Creation date of the object. LastModified *time.Time - // The owner of the object Directory buckets - The bucket owner is returned as the - // object owner. + // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. Owner *Owner // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Only the S3 Express One Zone storage class is supported by directory - // buckets to store objects. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in + // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage + // class) in Dedicated Local Zones. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object Size *int64 - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass ObjectStorageClass noSmithyDocumentSerde @@ -2195,16 +2889,39 @@ type Object struct { // Object Identifier is unique value to identify objects. type ObjectIdentifier struct { - // Key name of the object. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string - // Version ID for the specific version of the object to delete. This functionality - // is not supported for directory buckets. + // An entity tag (ETag) is an identifier assigned by a web server to a specific + // version of a resource found at a URL. This header field makes the request method + // conditional on ETags . + // + // Entity tags (ETags) for S3 Express One Zone are random alphanumeric strings + // unique to the object. + ETag *string + + // If present, the objects are deleted only if its modification times matches the + // provided Timestamp . + // + // This functionality is only supported for directory buckets. + LastModifiedTime *time.Time + + // If present, the objects are deleted only if its size matches the provided size + // in bytes. + // + // This functionality is only supported for directory buckets. + Size *int64 + + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -2262,38 +2979,41 @@ type ObjectLockRule struct { // A container for elements related to an individual part. type ObjectPart struct { - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the multipart upload request was created with the CRC32 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC32C checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the multipart upload request was created with the SHA1 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the multipart upload request was created with the SHA256 checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // The part number identifying the part. This value is a positive integer between @@ -2312,6 +3032,12 @@ type ObjectVersion struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm []ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // The entity tag is an MD5 hash of that version of the object. ETag *string @@ -2330,9 +3056,10 @@ type ObjectVersion struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object. @@ -2373,14 +3100,23 @@ type Owner struct { // Container for the display name of the owner. This value is only supported in // the following Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) + // // This functionality is not supported for directory buckets. DisplayName *string @@ -2405,23 +3141,30 @@ type OwnershipControls struct { type OwnershipControlsRule struct { // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html // // This member is required. ObjectOwnership ObjectOwnership @@ -2437,36 +3180,41 @@ type ParquetInput struct { // Container for elements related to a part. type Part struct { - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the object was uploaded with the CRC32 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the object was uploaded with the CRC32C checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the object was uploaded with the SHA1 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the object was uploaded with the SHA256 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -2486,13 +3234,22 @@ type Part struct { } // Amazon S3 keys for log objects are partitioned in the following format: -// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// // PartitionedPrefix defaults to EventTime delivery when server access logs are // delivered. type PartitionedPrefix struct { // Specifies the partition date source for the partitioned prefix. - // PartitionDateSource can be EventTime or DeliveryTime. + // PartitionDateSource can be EventTime or DeliveryTime . + // + // For DeliveryTime , the time in the log file names corresponds to the delivery + // time for the log files. + // + // For EventTime , The logs delivered are for a specific day only. The year, month, + // and day correspond to the day on which the event occurred, and the hour, minutes + // and seconds are set to 00 in the key. PartitionDateSource PartitionDateSource noSmithyDocumentSerde @@ -2534,41 +3291,48 @@ type ProgressEvent struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more -// information about when Amazon S3 considers a bucket or object public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon S3 User Guide. +// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in +// the Amazon S3 User Guide. +// +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should block public access control lists (ACLs) for // this bucket and objects in this bucket. Setting this element to TRUE causes the // following behavior: - // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is - // public. + // + // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public. + // // - PUT Object calls fail if the request includes a public ACL. + // // - PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool // Specifies whether Amazon S3 should block public bucket policies for this // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT - // Bucket policy if the specified bucket policy allows public access. Enabling this - // setting doesn't affect existing bucket policies. + // Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. BlockPublicPolicy *bool // Specifies whether Amazon S3 should ignore public ACLs for this bucket and // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore - // all public ACLs on this bucket and objects in this bucket. Enabling this setting - // doesn't affect the persistence of any existing ACLs and doesn't prevent new - // public ACLs from being set. + // all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs and + // doesn't prevent new public ACLs from being set. IgnorePublicAcls *bool // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only - // Amazon Web Service principals and authorized users within this account if the - // bucket has a public policy. Enabling this setting doesn't affect previously - // stored bucket policies, except that public and cross-account access within any - // public bucket policy, including non-public delegation to specific accounts, is - // blocked. + // Amazon Web Services service principals and authorized users within this account + // if the bucket has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. RestrictPublicBuckets *bool noSmithyDocumentSerde @@ -2590,8 +3354,9 @@ type QueueConfiguration struct { QueueArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -2604,7 +3369,14 @@ type QueueConfiguration struct { // The container for the records event. type RecordsEvent struct { - // The byte array of partial, one or more result records. + // The byte array of partial, one or more result records. S3 Select doesn't + // guarantee that a record will be self-contained in one record frame. To ensure + // continuous streaming of data, S3 Select might split the same record across + // multiple record frames instead of aggregating the results in memory. Some S3 + // clients (for example, the SDKforJava) handle this behavior by creating a + // ByteStream out of the response by default. Other clients might not handle this + // behavior by default. In those cases, you must aggregate the results on the + // client side and parse the response. Payload []byte noSmithyDocumentSerde @@ -2630,18 +3402,22 @@ type Redirect struct { // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one // of the siblings is present. Can be present only if ReplaceKeyWith is not - // provided. Replacement must be made for object keys containing special characters - // (such as carriage returns) when using XML requests. For more information, see - // XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyPrefixWith *string // The specific object key to use in the redirect request. For example, redirect // request to error.html . Not required if one of the siblings is present. Can be - // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made - // for object keys containing special characters (such as carriage returns) when - // using XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyWith *string noSmithyDocumentSerde @@ -2667,9 +3443,11 @@ type RedirectAllRequestsTo struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications -// on replicas. If you don't specify the Filter element, Amazon S3 assumes that -// the replication configuration is the earlier version, V1. In the earlier -// version, this element is not allowed. +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. type ReplicaModifications struct { // Specifies whether Amazon S3 replicates modifications on replicas. @@ -2685,9 +3463,10 @@ type ReplicaModifications struct { type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role - // that Amazon S3 assumes when replicating objects. For more information, see How - // to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon S3 User Guide. + // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in + // the Amazon S3 User Guide. + // + // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html // // This member is required. Role *string @@ -2720,34 +3499,41 @@ type ReplicationRule struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example - // configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) - // . For more information about delete marker replication, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) - // . If you are using an earlier version of the replication configuration, Amazon - // S3 handles replication of delete markers differently. For more information, see - // Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) - // . + // configuration, see [Basic Rule Configuration]. + // + // For more information about delete marker replication, see [Basic Rule Configuration]. + // + // If you are using an earlier version of the replication configuration, Amazon S3 + // handles replication of delete markers differently. For more information, see [Backward Compatibility]. + // + // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations DeleteMarkerReplication *DeleteMarkerReplication - // Optional configuration to replicate existing source bucket objects. For more - // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) - // in the Amazon S3 User Guide. + // Optional configuration to replicate existing source bucket objects. + // + // This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in + // the Amazon S3 User Guide. + // + // [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html ExistingObjectReplication *ExistingObjectReplication // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix , Tag , or an And child // element. - Filter ReplicationRuleFilter + Filter *ReplicationRuleFilter // A unique identifier for the rule. The maximum value is 255 characters. ID *string // An object key name prefix that identifies the object or objects to which the // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. Replacement must be made for - // object keys containing special characters (such as carriage returns) when using - // XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string @@ -2757,8 +3543,10 @@ type ReplicationRule struct { // according to all replication rules. However, if there are two or more rules with // the same destination bucket, then objects will be replicated according to the // rule with the highest priority. The higher the number, the higher the priority. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon S3 User Guide. + // + // For more information, see [Replication] in the Amazon S3 User Guide. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html Priority *int32 // A container that describes additional filters for identifying the source @@ -2773,9 +3561,13 @@ type ReplicationRule struct { // A container for specifying rule filters. The filters determine the subset of // objects to which the rule applies. This element is required only if you specify -// more than one filter. For example: +// more than one filter. +// +// For example: +// // - If you specify both a Prefix and a Tag filter, wrap these filters in an And // tag. +// // - If you specify a filter based on multiple tags, wrap the Tag elements in an // And tag. type ReplicationRuleAndOperator struct { @@ -2793,67 +3585,50 @@ type ReplicationRuleAndOperator struct { // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix , Tag , or an And child // element. -// -// The following types satisfy this interface: -// -// ReplicationRuleFilterMemberAnd -// ReplicationRuleFilterMemberPrefix -// ReplicationRuleFilterMemberTag -type ReplicationRuleFilter interface { - isReplicationRuleFilter() -} - -// A container for specifying rule filters. The filters determine the subset of -// objects to which the rule applies. This element is required only if you specify -// more than one filter. For example: -// - If you specify both a Prefix and a Tag filter, wrap these filters in an And -// tag. -// - If you specify a filter based on multiple tags, wrap the Tag elements in an -// And tag. -type ReplicationRuleFilterMemberAnd struct { - Value ReplicationRuleAndOperator +type ReplicationRuleFilter struct { - noSmithyDocumentSerde -} - -func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} - -// An object key name prefix that identifies the subset of objects to which the -// rule applies. Replacement must be made for object keys containing special -// characters (such as carriage returns) when using XML requests. For more -// information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . -type ReplicationRuleFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} + // A container for specifying rule filters. The filters determine the subset of + // objects to which the rule applies. This element is required only if you specify + // more than one filter. For example: + // + // - If you specify both a Prefix and a Tag filter, wrap these filters in an And + // tag. + // + // - If you specify a filter based on multiple tags, wrap the Tag elements in an + // And tag. + And *ReplicationRuleAndOperator -func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} + // An object key name prefix that identifies the subset of objects to which the + // rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + Prefix *string -// A container for specifying a tag key and value. The rule applies only to -// objects that have the tag in their tag set. -type ReplicationRuleFilterMemberTag struct { - Value Tag + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag noSmithyDocumentSerde } -func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} - -// A container specifying S3 Replication Time Control (S3 RTC) related +// A container specifying S3 Replication Time Control (S3 RTC) related +// // information, including whether S3 RTC is enabled and the time when all objects // and operations on objects must be replicated. Must be specified together with a // Metrics block. type ReplicationTime struct { - // Specifies whether the replication time is enabled. + // Specifies whether the replication time is enabled. // // This member is required. Status ReplicationTimeStatus - // A container specifying the time by which replication should be complete for all - // objects and operations on objects. + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. // // This member is required. Time *ReplicationTimeValue @@ -2861,11 +3636,14 @@ type ReplicationTime struct { noSmithyDocumentSerde } -// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// // and replication metrics EventThreshold . type ReplicationTimeValue struct { - // Contains an integer specifying time in minutes. Valid value: 15 + // Contains an integer specifying time in minutes. + // + // Valid value: 15 Minutes *int32 noSmithyDocumentSerde @@ -2896,8 +3674,10 @@ type RequestProgress struct { type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation . The Days element is required for regular restores, and must not - // be provided for select requests. + // OutputLocation . + // + // The Days element is required for regular restores, and must not be provided for + // select requests. Days *int32 // The optional description for the job. @@ -2910,13 +3690,23 @@ type RestoreRequest struct { // Describes the location where the restore job's output is stored. OutputLocation *OutputLocation + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // Describes the parameters for Select job types. + // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ SelectParameters *SelectParameters // Retrieval tier at which the restore will be processed. Tier Tier + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // Type of restore request. + // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ Type RestoreRequestType noSmithyDocumentSerde @@ -2924,34 +3714,45 @@ type RestoreRequest struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information -// about these storage classes and how to work with archived objects, see Working -// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. Only the S3 Express One Zone storage class is supported by directory -// buckets to store objects. +// about these storage classes and how to work with archived objects, see [Working with archived objects]in the +// Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Directory buckets +// only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in +// Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage +// class) in Dedicated Local Zones. +// +// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html type RestoreStatus struct { // Specifies whether the object is currently being restored. If the object // restoration is in progress, the header returns the value TRUE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="true" If the object - // restoration has completed, the header returns the value FALSE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored, - // there is no header response. + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE . + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. IsRestoreInProgress *bool // Indicates when the restored copy will expire. This value is populated only if // the object has already been restored. For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" RestoreExpiryDate *time.Time noSmithyDocumentSerde } // Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon S3 User Guide. +// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide. +// +// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects type RoutingRule struct { // Container for redirect information. You can redirect requests to another host, @@ -3015,6 +3816,69 @@ type S3Location struct { noSmithyDocumentSerde } +// The destination information for the metadata table configuration. The +// +// destination table bucket must be in the same Region and Amazon Web Services +// account as the general purpose bucket. The specified metadata table name must be +// unique within the aws_s3_metadata namespace in the destination table bucket. +type S3TablesDestination struct { + + // The Amazon Resource Name (ARN) for the table bucket that's specified as the + // destination in the metadata table configuration. The destination table bucket + // must be in the same Region and Amazon Web Services account as the general + // purpose bucket. + // + // This member is required. + TableBucketArn *string + + // The name for the metadata table in your metadata table configuration. The + // specified metadata table name must be unique within the aws_s3_metadata + // namespace in the destination table bucket. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +// The destination information for the metadata table configuration. The +// +// destination table bucket must be in the same Region and Amazon Web Services +// account as the general purpose bucket. The specified metadata table name must be +// unique within the aws_s3_metadata namespace in the destination table bucket. +type S3TablesDestinationResult struct { + + // The Amazon Resource Name (ARN) for the metadata table in the metadata table + // configuration. The specified metadata table name must be unique within the + // aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + TableArn *string + + // The Amazon Resource Name (ARN) for the table bucket that's specified as the + // destination in the metadata table configuration. The destination table bucket + // must be in the same Region and Amazon Web Services account as the general + // purpose bucket. + // + // This member is required. + TableBucketArn *string + + // The name for the metadata table in your metadata table configuration. The + // specified metadata table name must be unique within the aws_s3_metadata + // namespace in the destination table bucket. + // + // This member is required. + TableName *string + + // The table bucket namespace for the metadata table in your metadata table + // configuration. This value is always aws_s3_metadata . + // + // This member is required. + TableNamespace *string + + noSmithyDocumentSerde +} + // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section @@ -3094,11 +3958,26 @@ type SelectObjectContentEventStreamMemberStats struct { func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} +// Amazon S3 Select is no longer available to new customers. Existing customers of +// Amazon S3 Select can continue to use the feature as usual. [Learn more] +// // Describes the parameters for Select job types. +// +// Learn [How to optimize querying your data in Amazon S3] using [Amazon Athena], [S3 Object Lambda], or client-side filtering. +// +// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [How to optimize querying your data in Amazon S3]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [Amazon Athena]: https://docs.aws.amazon.com/athena/latest/ug/what-is.html +// [S3 Object Lambda]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html type SelectParameters struct { + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // The expression that is used to query the object. // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + // // This member is required. Expression *string @@ -3122,34 +4001,73 @@ type SelectParameters struct { // Describes the default server-side encryption to apply to new objects in the // bucket. If a PUT Object request doesn't specify any server-side encryption, this -// default encryption will be applied. If you don't specify a customer managed key -// at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key -// in your Amazon Web Services account the first time that you add an object -// encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for -// SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon S3 API Reference. +// default encryption will be applied. For more information, see [PutBucketEncryption]. +// +// - General purpose buckets - If you don't specify a customer managed key at +// configuration, Amazon S3 automatically creates an Amazon Web Services KMS key ( +// aws/s3 ) in your Amazon Web Services account the first time that you add an +// object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS +// key for SSE-KMS. +// +// - Directory buckets - Your SSE-KMS configuration can only support 1 [customer managed key]per +// directory bucket's lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. +// +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk type ServerSideEncryptionByDefault struct { // Server-side encryption algorithm to use for the default encryption. // + // For directory buckets, there are only two supported values for server-side + // encryption: AES256 and aws:kms . + // // This member is required. SSEAlgorithm ServerSideEncryption - // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services - // KMS key ID to use for the default encryption. This parameter is allowed if and - // only if SSEAlgorithm is set to aws:kms . You can specify the key ID, key alias, - // or the Amazon Resource Name (ARN) of the KMS key. + // Amazon Web Services Key Management Service (KMS) customer managed key ID to use + // for the default encryption. + // + // - General purpose buckets - This parameter is allowed if and only if + // SSEAlgorithm is set to aws:kms or aws:kms:dsse . + // + // - Directory buckets - This parameter is allowed if and only if SSEAlgorithm is + // set to aws:kms . + // + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the + // KMS key. + // // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key Alias: alias/alias-name - // If you use a key ID, you can run into a LogDestination undeliverable error when - // creating a VPC flow log. If you are using encryption with cross-account or - // Amazon Web Services service operations you must use a fully qualified KMS key - // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) - // . Amazon S3 only supports symmetric encryption KMS keys. For more information, - // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // + // If you are using encryption with cross-account or Amazon Web Services service + // operations, you must use a fully qualified KMS key ARN. For more information, + // see [Using encryption for cross-account operations]. + // + // - General purpose buckets - If you're specifying a customer managed KMS key, + // we recommend using a fully qualified KMS key ARN. If you use a KMS key alias + // instead, then KMS resolves the key within the requester’s account. This behavior + // can result in data that's encrypted with a KMS key that belongs to the + // requester, and not the bucket owner. Also, if you use a key ID, you can run into + // a LogDestination undeliverable error when creating a VPC flow log. + // + // - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory + // bucket, only use the key ID or key ARN. The key alias format of the KMS key + // isn't supported. + // + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy + // [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSMasterKeyID *string noSmithyDocumentSerde @@ -3168,6 +4086,18 @@ type ServerSideEncryptionConfiguration struct { } // Specifies the default server-side encryption configuration. +// +// - General purpose buckets - If you're specifying a customer managed KMS key, +// we recommend using a fully qualified KMS key ARN. If you use a KMS key alias +// instead, then KMS resolves the key within the requester’s account. This behavior +// can result in data that's encrypted with a KMS key that belongs to the +// requester, and not the bucket owner. +// +// - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory +// bucket, only use the key ID or key ARN. The key alias format of the KMS key +// isn't supported. +// +// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk type ServerSideEncryptionRule struct { // Specifies the default server-side encryption to apply to new objects in the @@ -3178,17 +4108,33 @@ type ServerSideEncryptionRule struct { // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 - // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more - // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. + // to use an S3 Bucket Key. + // + // - General purpose buckets - By default, S3 Bucket Key is not enabled. For + // more information, see [Amazon S3 Bucket Keys]in the Amazon S3 User Guide. + // + // - Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool noSmithyDocumentSerde } -// The established temporary security credentials of the session. Directory -// buckets - These session credentials are only supported for the authentication -// and authorization of Zonal endpoint APIs on directory buckets. +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint API operations on directory +// buckets. type SessionCredentials struct { // A unique identifier that's associated with a secret access key. The access key @@ -3224,7 +4170,9 @@ type SessionCredentials struct { } // To use simple format for S3 keys for log objects, set SimplePrefix to an empty -// object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] type SimplePrefix struct { noSmithyDocumentSerde } @@ -3240,12 +4188,14 @@ type SourceSelectionCriteria struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications - // on replicas. If you don't specify the Filter element, Amazon S3 assumes that - // the replication configuration is the earlier version, V1. In the earlier - // version, this element is not allowed + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed ReplicaModifications *ReplicaModifications - // A container for filter information for the selection of Amazon S3 objects + // A container for filter information for the selection of Amazon S3 objects // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria // in the replication configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects @@ -3363,10 +4313,12 @@ type Tagging struct { noSmithyDocumentSerde } -// Container for granting information. Buckets that use the bucket owner enforced -// setting for Object Ownership don't support target grants. For more information, -// see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. +// Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide. +// +// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general type TargetGrant struct { // Container for the person being granted permissions. @@ -3397,9 +4349,10 @@ type TargetObjectKeyFormat struct { // without additional operational overhead. type Tiering struct { - // S3 Intelligent-Tiering access tier. See Storage class for automatically - // optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) - // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3 + // Intelligent-Tiering storage class. + // + // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access // // This member is required. AccessTier IntelligentTieringAccessTier @@ -3422,8 +4375,9 @@ type Tiering struct { type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -3435,8 +4389,9 @@ type TopicConfiguration struct { TopicArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -3447,9 +4402,10 @@ type TopicConfiguration struct { } // Specifies when an object transitions to a specified storage class. For more -// information about Amazon S3 lifecycle configuration rules, see Transitioning -// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon S3 User Guide. +// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3 +// User Guide. +// +// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html type Transition struct { // Indicates when objects are transitioned to the specified storage class. The @@ -3457,7 +4413,15 @@ type Transition struct { Date *time.Time // Indicates the number of days after creation when objects are transitioned to - // the specified storage class. The value must be a positive integer. + // the specified storage class. If the specified storage class is + // INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE , valid values are + // 0 or positive integers. If the specified storage class is STANDARD_IA or + // ONEZONE_IA , valid values are positive integers greater than 30 . Be aware that + // some storage classes have a minimum storage duration and that you're charged for + // transitioning objects before their minimum storage duration. For more + // information, see [Constraints and considerations for transitions]in the Amazon S3 User Guide. + // + // [Constraints and considerations for transitions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html#lifecycle-configuration-constraints Days *int32 // The storage class to which you want the object to transition. @@ -3467,8 +4431,9 @@ type Transition struct { } // Describes the versioning state of an Amazon S3 bucket. For more information, -// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon S3 API Reference. +// see [PUT Bucket versioning]in the Amazon S3 API Reference. +// +// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html type VersioningConfiguration struct { // Specifies whether MFA delete is enabled in the bucket versioning configuration. @@ -3491,8 +4456,9 @@ type WebsiteConfiguration struct { // The name of the index document for the website. IndexDocument *IndexDocument - // The redirect behavior for every request to this bucket's website endpoint. If - // you specify this property, you can't specify any other property. + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo // Rules that define when a redirect is applied and the redirect behavior. @@ -3513,7 +4479,5 @@ type UnknownUnionMember struct { } func (*UnknownUnionMember) isAnalyticsFilter() {} -func (*UnknownUnionMember) isLifecycleRuleFilter() {} func (*UnknownUnionMember) isMetricsFilter() {} -func (*UnknownUnionMember) isReplicationRuleFilter() {} func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go new file mode 100644 index 00000000..0e664c59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go @@ -0,0 +1,23 @@ +package s3 + +// This contains helper methods to set resolver URI into the context object. If they are ever used for +// something other than S3, they should be moved to internal/context/context.go + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type s3resolvedURI struct{} + +// setS3ResolvedURI sets the URI as resolved by the EndpointResolverV2 +func setS3ResolvedURI(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, s3resolvedURI{}, value) +} + +// getS3ResolvedURI gets the URI as resolved by EndpointResolverV2 +func getS3ResolvedURI(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, s3resolvedURI{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go index e954b302..97a56bd3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go @@ -90,6 +90,26 @@ func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middle return next.HandleInitialize(ctx, in) } +type validateOpCreateBucketMetadataTableConfiguration struct { +} + +func (*validateOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateMultipartUpload struct { } @@ -270,6 +290,26 @@ func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*validateOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteBucketMetricsConfiguration struct { } @@ -670,6 +710,26 @@ func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpGetBucketMetadataTableConfiguration struct { +} + +func (*validateOpGetBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetBucketMetricsConfiguration struct { } @@ -1886,6 +1946,10 @@ func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) } +func addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) } @@ -1922,6 +1986,10 @@ func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) } +func addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) } @@ -2002,6 +2070,10 @@ func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) } +func addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) } @@ -2912,22 +2984,20 @@ func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { } } -func validateLifecycleRuleFilter(v types.LifecycleRuleFilter) error { +func validateLifecycleRuleFilter(v *types.LifecycleRuleFilter) error { if v == nil { return nil } invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} - switch uv := v.(type) { - case *types.LifecycleRuleFilterMemberAnd: - if err := validateLifecycleRuleAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) } - - case *types.LifecycleRuleFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + if v.And != nil { + if err := validateLifecycleRuleAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) } - } if invalidParams.Len() > 0 { return invalidParams @@ -2976,6 +3046,25 @@ func validateLoggingEnabled(v *types.LoggingEnabled) error { } } +func validateMetadataTableConfiguration(v *types.MetadataTableConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetadataTableConfiguration"} + if v.S3TablesDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3TablesDestination")) + } else if v.S3TablesDestination != nil { + if err := validateS3TablesDestination(v.S3TablesDestination); err != nil { + invalidParams.AddNested("S3TablesDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateMetrics(v *types.Metrics) error { if v == nil { return nil @@ -3320,22 +3409,20 @@ func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) err } } -func validateReplicationRuleFilter(v types.ReplicationRuleFilter) error { +func validateReplicationRuleFilter(v *types.ReplicationRuleFilter) error { if v == nil { return nil } invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} - switch uv := v.(type) { - case *types.ReplicationRuleFilterMemberAnd: - if err := validateReplicationRuleAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) } - - case *types.ReplicationRuleFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + if v.And != nil { + if err := validateReplicationRuleAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) } - } if invalidParams.Len() > 0 { return invalidParams @@ -3486,6 +3573,24 @@ func validateS3Location(v *types.S3Location) error { } } +func validateS3TablesDestination(v *types.S3TablesDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3TablesDestination"} + if v.TableBucketArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableBucketArn")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateSelectParameters(v *types.SelectParameters) error { if v == nil { return nil @@ -3937,6 +4042,28 @@ func validateOpCreateBucketInput(v *CreateBucketInput) error { } } +func validateOpCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.MetadataTableConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("MetadataTableConfiguration")) + } else if v.MetadataTableConfiguration != nil { + if err := validateMetadataTableConfiguration(v.MetadataTableConfiguration); err != nil { + invalidParams.AddNested("MetadataTableConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { if v == nil { return nil @@ -4084,6 +4211,21 @@ func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { } } +func validateOpDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { if v == nil { return nil @@ -4409,6 +4551,21 @@ func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { } } +func validateOpGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md new file mode 100644 index 00000000..3be25b8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -0,0 +1,602 @@ +# v1.25.3 (2025-04-03) + +* No change notes available for this release. + +# v1.25.2 (2025-03-25) + +* No change notes available for this release. + +# v1.25.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.25.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.16 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.15 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.14 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.13 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.12 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.24.11 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.24.10 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.9 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.8 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.7 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2024-11-07) + +* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses + +# v1.24.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.4 (2024-10-03) + +* No change notes available for this release. + +# v1.23.3 (2024-09-27) + +* No change notes available for this release. + +# v1.23.2 (2024-09-25) + +* No change notes available for this release. + +# v1.23.1 (2024-09-23) + +* No change notes available for this release. + +# v1.23.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.22.7 (2024-09-04) + +* No change notes available for this release. + +# v1.22.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.4 (2024-07-18) + +* No change notes available for this release. + +# v1.22.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.21.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.9 (2024-05-23) + +* No change notes available for this release. + +# v1.20.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.20.5 (2024-04-05) + +* No change notes available for this release. + +# v1.20.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.3 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.19.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.19.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2024-01-18) + +* No change notes available for this release. + +# v1.18.6 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.18.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + +# v1.18.2 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.17.3 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-10-02) + +* **Feature**: Fix FIPS Endpoints in aws-us-gov. + +# v1.14.1 (2023-09-22) + +* No change notes available for this release. + +# v1.14.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.13.6 (2023-08-31) + +* No change notes available for this release. + +# v1.13.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-08-01) + +* No change notes available for this release. + +# v1.13.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2023-06-15) + +* No change notes available for this release. + +# v1.12.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2023-05-04) + +* No change notes available for this release. + +# v1.12.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2023-04-10) + +* No change notes available for this release. + +# v1.12.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.12.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.12.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.11.28 (2022-12-20) + +* No change notes available for this release. + +# v1.11.27 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.25 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.24 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.23 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.22 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2022-08-30) + +* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. + +# v1.11.18 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2022-08-15) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# v1.11.16 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2022-07-11) + +* No change notes available for this release. + +# v1.11.11 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2022-06-16) + +* No change notes available for this release. + +# v1.11.8 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2022-05-26) + +* No change notes available for this release. + +# v1.11.6 (2022-05-25) + +* No change notes available for this release. + +# v1.11.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated API models +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. + +# v1.6.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go new file mode 100644 index 00000000..9f10e65a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -0,0 +1,943 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "sync/atomic" + "time" +) + +const ServiceID = "SSO" +const ServiceAPIVersion = "2019-06-10" + +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sso") +} + +// Client provides the API client to make operations call for AWS Single Sign-On. +type Client struct { + options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveEndpointResolverV2(&options) + + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + + ignoreAnonymousAuth(&options) + + wrapWithAnonymousAuth(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + initializeTimeOffsetResolver(client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + finalizeOperationRetryMaxAttempts(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + ctx, err = withOperationMetrics(ctx, options.MeterProvider) + if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + }) + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go new file mode 100644 index 00000000..b8031eee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -0,0 +1,171 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { + if params == nil { + params = &GetRoleCredentialsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRoleCredentialsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRoleCredentialsInput struct { + + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The friendly name of the role that is assigned to the user. + // + // This member is required. + RoleName *string + + noSmithyDocumentSerde +} + +type GetRoleCredentialsOutput struct { + + // The credentials for the role that is assigned to the user. + RoleCredentials *types.RoleCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetRoleCredentials", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go new file mode 100644 index 00000000..4294e4d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -0,0 +1,269 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all roles that are assigned to the user for a given AWS account. +func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if params == nil { + params = &ListAccountRolesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountRolesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountRolesInput struct { + + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The number of items that clients can request per page. + MaxResults *int32 + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountRolesOutput struct { + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []types.RoleInfo + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles +type ListAccountRolesPaginatorOptions struct { + // The number of items that clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountRolesPaginator is a paginator for ListAccountRoles +type ListAccountRolesPaginator struct { + options ListAccountRolesPaginatorOptions + client ListAccountRolesAPIClient + params *ListAccountRolesInput + nextToken *string + firstPage bool +} + +// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator +func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { + if params == nil { + params = &ListAccountRolesInput{} + } + + options := ListAccountRolesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountRolesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountRolesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccountRoles page. +func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccountRoles", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go new file mode 100644 index 00000000..1db72a99 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -0,0 +1,267 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by +// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity +// Center User Guide. This operation returns a paginated response. +// +// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers +func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if params == nil { + params = &ListAccountsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountsInput struct { + + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + // + // This member is required. + AccessToken *string + + // This is the number of items clients can request per page. + MaxResults *int32 + + // (Optional) When requesting subsequent pages, this is the page token from the + // previous response output. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountsOutput struct { + + // A paginated response with the list of account information and the next token if + // more results are available. + AccountList []types.AccountInfo + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpListAccountsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListAccountsPaginatorOptions is the paginator options for ListAccounts +type ListAccountsPaginatorOptions struct { + // This is the number of items clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountsPaginator is a paginator for ListAccounts +type ListAccountsPaginator struct { + options ListAccountsPaginatorOptions + client ListAccountsAPIClient + params *ListAccountsInput + nextToken *string + firstPage bool +} + +// NewListAccountsPaginator returns a new ListAccountsPaginator +func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { + if params == nil { + params = &ListAccountsInput{} + } + + options := ListAccountsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccounts page. +func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccounts", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go new file mode 100644 index 00000000..2ca66ca5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -0,0 +1,170 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the locally stored SSO tokens from the client-side cache and sends an +// API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, IAM +// Identity Center assumes an IAM role in the target account on behalf of the user, +// and the corresponding temporary AWS credentials are returned to the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured in +// the permission set. For more information, see [User authentications]in the IAM Identity Center User +// Guide. +// +// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html +func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { + if params == nil { + params = &LogoutInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*LogoutOutput) + out.ResultMetadata = metadata + return out, nil +} + +type LogoutInput struct { + + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html + // + // This member is required. + AccessToken *string + + noSmithyDocumentSerde +} + +type LogoutOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpLogoutValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Logout", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go new file mode 100644 index 00000000..366963b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -0,0 +1,337 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(ctx, params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "Logout": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "awsssoportal") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + + span.End() + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { + options Options +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go new file mode 100644 index 00000000..ec23c36f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -0,0 +1,1182 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" + "time" +) + +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + +type awsRestjson1_deserializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) + } + output := &GetRoleCredentialsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRoleCredentialsOutput + if *v == nil { + sv = &GetRoleCredentialsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "roleCredentials": + if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccountRoles struct { +} + +func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) + } + output := &ListAccountRolesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountRolesOutput + if *v == nil { + sv = &ListAccountRolesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "roleList": + if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccounts struct { +} + +func (*awsRestjson1_deserializeOpListAccounts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) + } + output := &ListAccountsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountsOutput + if *v == nil { + sv = &ListAccountsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountList": + if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpLogout struct { +} + +func (*awsRestjson1_deserializeOpLogout) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) + } + output := &LogoutOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyRequestsException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccountInfo + if *v == nil { + sv = &types.AccountInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "accountName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) + } + sv.AccountName = ptr.String(jtv) + } + + case "emailAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) + } + sv.EmailAddress = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AccountInfo + if *v == nil { + cv = []types.AccountInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AccountInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleCredentials + if *v == nil { + sv = &types.RoleCredentials{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) + } + sv.AccessKeyId = ptr.String(jtv) + } + + case "expiration": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Expiration = i64 + } + + case "secretAccessKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) + } + sv.SecretAccessKey = ptr.String(jtv) + } + + case "sessionToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) + } + sv.SessionToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleInfo + if *v == nil { + sv = &types.RoleInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "roleName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) + } + sv.RoleName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RoleInfo + if *v == nil { + cv = []types.RoleInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RoleInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyRequestsException + if *v == nil { + sv = &types.TooManyRequestsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedException + if *v == nil { + sv = &types.UnauthorizedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message", "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go new file mode 100644 index 00000000..7f6e429f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -0,0 +1,27 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sso provides the API client, operations, and parameter types for AWS +// Single Sign-On. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity Center +// resources such as the AWS access portal. Users can get AWS account applications +// and roles assigned to them and get federated into the application. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API +// namespaces will continue to retain their original name for backward +// compatibility purposes. For more information, see [IAM Identity Center rename]. +// +// This reference guide describes the IAM Identity Center Portal operations that +// you can call programatically and includes detailed information on data types and +// errors. +// +// AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. +// The SDKs provide a convenient way to create programmatic access to IAM Identity +// Center and other AWS services. For more information about the AWS SDKs, +// including how to download and install them, see [Tools for Amazon Web Services]. +// +// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ +// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed +package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go new file mode 100644 index 00000000..53c6bc75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -0,0 +1,556 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssoportal" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json new file mode 100644 index 00000000..1a88fe4d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -0,0 +1,36 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_GetRoleCredentials.go", + "api_op_ListAccountRoles.go", + "api_op_ListAccounts.go", + "api_op_Logout.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "sra_operation_order_test.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.22", + "module": "github.com/aws/aws-sdk-go-v2/service/sso", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go new file mode 100644 index 00000000..59aa2aeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sso + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.25.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go new file mode 100644 index 00000000..04416606 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -0,0 +1,597 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-5.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-5", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go new file mode 100644 index 00000000..aa744f15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -0,0 +1,232 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The client meter provider. + MeterProvider metrics.MeterProvider + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The client tracer provider. + TracerProvider tracing.TracerProvider + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go new file mode 100644 index 00000000..a7a5b57d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go @@ -0,0 +1,309 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRoleCredentialsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/federation/credentials") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.RoleName != nil { + encoder.SetQuery("role_name").String(*v.RoleName) + } + + return nil +} + +type awsRestjson1_serializeOpListAccountRoles struct { +} + +func (*awsRestjson1_serializeOpListAccountRoles) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountRolesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/roles") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListAccounts struct { +} + +func (*awsRestjson1_serializeOpListAccounts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpLogout struct { +} + +func (*awsRestjson1_serializeOpLogout) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*LogoutInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/logout") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go new file mode 100644 index 00000000..e97a126e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go @@ -0,0 +1,115 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// Indicates that a problem occurred with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is being made too frequently and is more than what +// the server can handle. +type TooManyRequestsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyRequestsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyRequestsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyRequestsException" + } + return *e.ErrorCodeOverride +} +func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go new file mode 100644 index 00000000..07ac468e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -0,0 +1,63 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +// Provides information about your AWS account. +type AccountInfo struct { + + // The identifier of the AWS account that is assigned to the user. + AccountId *string + + // The display name of the AWS account that is assigned to the user. + AccountName *string + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string + + noSmithyDocumentSerde +} + +// Provides information about the role credentials that are assigned to the user. +type RoleCredentials struct { + + // The identifier used for the temporary security credentials. For more + // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html + AccessKeyId *string + + // The date on which temporary security credentials expire. + Expiration int64 + + // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html + SecretAccessKey *string + + // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html + SessionToken *string + + noSmithyDocumentSerde +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + + // The identifier of the AWS account assigned to the user. + AccountId *string + + // The friendly name of the role that is assigned to the user. + RoleName *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go new file mode 100644 index 00000000..f6bf461f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpGetRoleCredentials struct { +} + +func (*validateOpGetRoleCredentials) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRoleCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRoleCredentialsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccountRoles struct { +} + +func (*validateOpListAccountRoles) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountRolesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountRolesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccounts struct { +} + +func (*validateOpListAccounts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpLogout struct { +} + +func (*validateOpLogout) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*LogoutInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpLogoutInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) +} + +func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) +} + +func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) +} + +func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpLogout{}, middleware.After) +} + +func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} + if v.RoleName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleName")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountsInput(v *ListAccountsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpLogoutInput(v *LogoutInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md new file mode 100644 index 00000000..b4cdac6b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -0,0 +1,597 @@ +# v1.30.1 (2025-04-03) + +* No change notes available for this release. + +# v1.30.0 (2025-03-27) + +* **Feature**: This release adds AwsAdditionalDetails in the CreateTokenWithIAM API response. + +# v1.29.2 (2025-03-24) + +* No change notes available for this release. + +# v1.29.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.29.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.15 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.14 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.13 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.12 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2025-01-24) + +* **Documentation**: Fixed typos in the descriptions. +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.28.10 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.28.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.4 (2024-10-03) + +* No change notes available for this release. + +# v1.27.3 (2024-09-27) + +* No change notes available for this release. + +# v1.27.2 (2024-09-25) + +* No change notes available for this release. + +# v1.27.1 (2024-09-23) + +* No change notes available for this release. + +# v1.27.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.26.7 (2024-09-04) + +* No change notes available for this release. + +# v1.26.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.4 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2024-07-03) + +* No change notes available for this release. + +# v1.26.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.25.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2024-05-23) + +* No change notes available for this release. + +# v1.24.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-05-10) + +* **Feature**: Updated request parameters for PKCE support. + +# v1.23.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.23.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.3 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.22.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.22.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.7 (2024-01-16) + +* No change notes available for this release. + +# v1.21.6 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.21.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + +# v1.21.2 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.3 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.2 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.20.1 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-11-17) + +* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`. + +# v1.19.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2023-09-22) + +* No change notes available for this release. + +# v1.17.0 (2023-09-20) + +* **Feature**: Update FIPS endpoints in aws-us-gov. + +# v1.16.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.15.6 (2023-09-05) + +* No change notes available for this release. + +# v1.15.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-08-01) + +* No change notes available for this release. + +# v1.15.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.14 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.13 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.12 (2023-06-15) + +* No change notes available for this release. + +# v1.14.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.10 (2023-05-04) + +* No change notes available for this release. + +# v1.14.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.8 (2023-04-10) + +* No change notes available for this release. + +# v1.14.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.14.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.14.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.13.11 (2022-12-19) + +* No change notes available for this release. + +# v1.13.10 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-09-30) + +* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. + +# v1.13.5 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-08-25) + +* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. + +# v1.12.14 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-07-11) + +* No change notes available for this release. + +# v1.12.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-05-27) + +* No change notes available for this release. + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go new file mode 100644 index 00000000..57440b1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -0,0 +1,943 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "sync/atomic" + "time" +) + +const ServiceID = "SSO OIDC" +const ServiceAPIVersion = "2019-06-10" + +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ssooidc") +} + +// Client provides the API client to make operations call for AWS SSO OIDC. +type Client struct { + options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveEndpointResolverV2(&options) + + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + + ignoreAnonymousAuth(&options) + + wrapWithAnonymousAuth(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + initializeTimeOffsetResolver(client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + finalizeOperationRetryMaxAttempts(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + ctx, err = withOperationMetrics(ctx, options.MeterProvider) + if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + }) + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go new file mode 100644 index 00000000..49387833 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -0,0 +1,242 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates and returns access and refresh tokens for clients that are +// authenticated using client secrets. The access token can be used to fetch +// short-lived credentials for the assigned AWS accounts or to access application +// APIs using bearer authentication. +func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { + if params == nil { + params = &CreateTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTokenInput struct { + + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClientAPI. + // + // This member is required. + ClientId *string + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClientAPI. + // + // This member is required. + ClientSecret *string + + // Supports the following OAuth grant types: Authorization Code, Device Code, and + // Refresh Token. Specify one of the following values, depending on the grant type + // that you want: + // + // * Authorization Code - authorization_code + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // This member is required. + GrantType *string + + // Used only when calling this API for the Authorization Code grant type. The + // short-lived code is used to identify this authorization request. + Code *string + + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + + // Used only when calling this API for the Device Code grant type. This + // short-lived code is used to identify this authorization request. This comes from + // the result of the StartDeviceAuthorizationAPI. + DeviceCode *string + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered to + // receive the authorization code. + RedirectUri *string + + // Used only when calling this API for the Refresh Token grant type. This token is + // used to refresh short-lived tokens, such as the access token, that might expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + RefreshToken *string + + // The list of scopes for which authorization is requested. The access token that + // is issued is limited to the scopes that are granted. If this value is not + // specified, IAM Identity Center authorizes all scopes that are configured for the + // client during the call to RegisterClient. + Scope []string + + noSmithyDocumentSerde +} + +type CreateTokenOutput struct { + + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. + AccessToken *string + + // Indicates the time in seconds when an access token will expire. + ExpiresIn int32 + + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference]. + // + // A JSON Web Token (JWT) that identifies who is associated with the issued access + // token. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + IdToken *string + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + RefreshToken *string + + // Used to notify the client that the returned token is an access token. The + // supported token type is Bearer . + TokenType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go new file mode 100644 index 00000000..09f3647e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -0,0 +1,280 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates and returns access and refresh tokens for clients and applications that +// are authenticated using IAM entities. The access token can be used to fetch +// short-lived credentials for the assigned Amazon Web Services accounts or to +// access application APIs using bearer authentication. +func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { + if params == nil { + params = &CreateTokenWithIAMInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTokenWithIAMOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTokenWithIAMInput struct { + + // The unique identifier string for the client or application. This value is an + // application ARN that has OAuth grants configured. + // + // This member is required. + ClientId *string + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange + // + // This member is required. + GrantType *string + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To + // authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the + // application. + Assertion *string + + // Used only when calling this API for the Authorization Code grant type. This + // short-lived code is used to identify this authorization request. The code is + // obtained through a redirect from IAM Identity Center to a redirect URI persisted + // in the Authorization Code GrantOptions for the application. + Code *string + + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered to + // receive the authorization code. + RedirectUri *string + + // Used only when calling this API for the Refresh Token grant type. This token is + // used to refresh short-lived tokens, such as the access token, that might expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + RefreshToken *string + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following values + // are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string + + // The list of scopes for which authorization is requested. The access token that + // is issued is limited to the scopes that are granted. If the value is not + // specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid , aws , + // sts:identity_context . + Scope []string + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must be an + // access token issued by IAM Identity Center to a different client or application. + // The access token must have authorized scopes that indicate the requested + // application as a target audience. + SubjectToken *string + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. The + // following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string + + noSmithyDocumentSerde +} + +type CreateTokenWithIAMOutput struct { + + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. + AccessToken *string + + // A structure containing information from the idToken . Only the identityContext + // is in it, which is a value extracted from the idToken . This provides direct + // access to identity information without requiring JWT parsing. + AwsAdditionalDetails *types.AwsAdditionalDetails + + // Indicates the time in seconds when an access token will expire. + ExpiresIn int32 + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + IdToken *string + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html + RefreshToken *string + + // The list of scopes for which authorization is granted. The access token that is + // issued is limited to the scopes that are granted. + Scope []string + + // Used to notify the requester that the returned token is an access token. The + // supported token type is Bearer . + TokenType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateTokenWithIAM", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go new file mode 100644 index 00000000..1e2d3828 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -0,0 +1,212 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Registers a public client with IAM Identity Center. This allows clients to +// perform authorization using the authorization code grant with Proof Key for Code +// Exchange (PKCE) or the device code grant. +func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { + if params == nil { + params = &RegisterClientInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RegisterClientOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterClientInput struct { + + // The friendly name of the client. + // + // This member is required. + ClientName *string + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // This member is required. + ClientType *string + + // This IAM Identity Center application ARN is used to define + // administrator-managed configuration for public client access to resources. At + // authorization, the scopes, grants, and redirect URI available to this client + // will be restricted by this application resource. + EntitledApplicationArn *string + + // The list of OAuth 2.0 grant types that are defined by the client. This list is + // used to restrict the token granting flows available to the client. Supports the + // following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh + // Token. + // + // * Authorization Code - authorization_code + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + GrantTypes []string + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent can + // be redirected back to. + RedirectUris []string + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []string + + noSmithyDocumentSerde +} + +type RegisterClientOutput struct { + + // An endpoint that the client can use to request authorization. + AuthorizationEndpoint *string + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt int64 + + // A secret string generated for the client. The client will use this string to + // get authenticated by the service in subsequent calls. + ClientSecret *string + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt int64 + + // An endpoint that the client can use to create tokens. + TokenEndpoint *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpRegisterClientValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterClient", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go new file mode 100644 index 00000000..de0108f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -0,0 +1,194 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Initiates device authorization by requesting a pair of verification codes from +// the authorization service. +func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { + if params == nil { + params = &StartDeviceAuthorizationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartDeviceAuthorizationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartDeviceAuthorizationInput struct { + + // The unique identifier string for the client that is registered with IAM + // Identity Center. This value should come from the persisted result of the RegisterClientAPI + // operation. + // + // This member is required. + ClientId *string + + // A secret string that is generated for the client. This value should come from + // the persisted result of the RegisterClientAPI operation. + // + // This member is required. + ClientSecret *string + + // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal] + // in the IAM Identity Center User Guide. + // + // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html + // + // This member is required. + StartUrl *string + + noSmithyDocumentSerde +} + +type StartDeviceAuthorizationOutput struct { + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn int32 + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval int32 + + // A one-time user verification code. This is needed to authorize an in-use device. + UserCode *string + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartDeviceAuthorization", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go new file mode 100644 index 00000000..e4b87f5b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -0,0 +1,331 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(ctx, params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "sso-oauth") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + + span.End() + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { + options Options +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go new file mode 100644 index 00000000..93f3653d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -0,0 +1,2233 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" + "time" +) + +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + +type awsRestjson1_deserializeOpCreateToken struct { +} + +func (*awsRestjson1_deserializeOpCreateToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) + } + output := &CreateTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("AuthorizationPendingException", errorCode): + return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) + + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidGrantException", errorCode): + return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTokenOutput + if *v == nil { + sv = &CreateTokenOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) + } + sv.AccessToken = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "idToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) + } + sv.IdToken = ptr.String(jtv) + } + + case "refreshToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) + } + sv.RefreshToken = ptr.String(jtv) + } + + case "tokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) + } + sv.TokenType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpCreateTokenWithIAM struct { +} + +func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata) + } + output := &CreateTokenWithIAMOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("AuthorizationPendingException", errorCode): + return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) + + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidGrantException", errorCode): + return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidRequestRegionException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTokenWithIAMOutput + if *v == nil { + sv = &CreateTokenWithIAMOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) + } + sv.AccessToken = ptr.String(jtv) + } + + case "awsAdditionalDetails": + if err := awsRestjson1_deserializeDocumentAwsAdditionalDetails(&sv.AwsAdditionalDetails, value); err != nil { + return err + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "idToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) + } + sv.IdToken = ptr.String(jtv) + } + + case "issuedTokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value) + } + sv.IssuedTokenType = ptr.String(jtv) + } + + case "refreshToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) + } + sv.RefreshToken = ptr.String(jtv) + } + + case "scope": + if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil { + return err + } + + case "tokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) + } + sv.TokenType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpRegisterClient struct { +} + +func (*awsRestjson1_deserializeOpRegisterClient) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) + } + output := &RegisterClientOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientMetadataException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + + case strings.EqualFold("InvalidRedirectUriException", errorCode): + return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterClientOutput + if *v == nil { + sv = &RegisterClientOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.AuthorizationEndpoint = ptr.String(jtv) + } + + case "clientId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) + } + sv.ClientId = ptr.String(jtv) + } + + case "clientIdIssuedAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientIdIssuedAt = i64 + } + + case "clientSecret": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) + } + sv.ClientSecret = ptr.String(jtv) + } + + case "clientSecretExpiresAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientSecretExpiresAt = i64 + } + + case "tokenEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.TokenEndpoint = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) + } + output := &StartDeviceAuthorizationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartDeviceAuthorizationOutput + if *v == nil { + sv = &StartDeviceAuthorizationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deviceCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) + } + sv.DeviceCode = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "interval": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Interval = int32(i64) + } + + case "userCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) + } + sv.UserCode = ptr.String(jtv) + } + + case "verificationUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUri = ptr.String(jtv) + } + + case "verificationUriComplete": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUriComplete = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AuthorizationPendingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientMetadataException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidGrantException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRedirectUriException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestRegionException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidScopeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SlowDownException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnsupportedGrantTypeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AuthorizationPendingException + if *v == nil { + sv = &types.AuthorizationPendingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAwsAdditionalDetails(v **types.AwsAdditionalDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AwsAdditionalDetails + if *v == nil { + sv = &types.AwsAdditionalDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "identityContext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdentityContext to be of type string, got %T instead", value) + } + sv.IdentityContext = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientException + if *v == nil { + sv = &types.InvalidClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientMetadataException + if *v == nil { + sv = &types.InvalidClientMetadataException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantException + if *v == nil { + sv = &types.InvalidGrantException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRedirectUriException + if *v == nil { + sv = &types.InvalidRedirectUriException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestRegionException + if *v == nil { + sv = &types.InvalidRequestRegionException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "endpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Location to be of type string, got %T instead", value) + } + sv.Endpoint = ptr.String(jtv) + } + + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + case "region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Region to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidScopeException + if *v == nil { + sv = &types.InvalidScopeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Scope to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SlowDownException + if *v == nil { + sv = &types.SlowDownException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedClientException + if *v == nil { + sv = &types.UnauthorizedClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedGrantTypeException + if *v == nil { + sv = &types.UnsupportedGrantTypeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go new file mode 100644 index 00000000..f3510b18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -0,0 +1,49 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ssooidc provides the API client, operations, and parameter types for +// AWS SSO OIDC. +// +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access token +// upon successful authentication and authorization with IAM Identity Center. +// +// # API namespaces +// +// IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity +// Center OpenID Connect uses the sso-oidc namespace. +// +// # Considerations for using this guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to +// enable single sign-on authentication with the CLI. +// +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access the +// OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with +// support for OIDC token refresh and configurable IAM Identity Center session +// durations. For more information, see [Configure Amazon Web Services access portal session duration]. +// +// - The access tokens provided by this service grant access to all Amazon Web +// Services account entitlements assigned to an IAM Identity Center user, not just +// a particular application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into Amazon Web Services Auth (“sigv4”) credentials for use +// with IAM-protected Amazon Web Services service endpoints. For more information, +// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity +// Center User Guide. +// +// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html +// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html +// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628 +// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go new file mode 100644 index 00000000..6feea0c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -0,0 +1,556 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "sso-oauth" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if _PartitionResult.SupportsFIPS == true { + if _PartitionResult.Name == "aws-us-gov" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json new file mode 100644 index 00000000..35f18097 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -0,0 +1,36 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateToken.go", + "api_op_CreateTokenWithIAM.go", + "api_op_RegisterClient.go", + "api_op_StartDeviceAuthorization.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "sra_operation_order_test.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.22", + "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go new file mode 100644 index 00000000..04623412 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ssooidc + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.30.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go new file mode 100644 index 00000000..ba7b4f9e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -0,0 +1,597 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO OIDC endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-5.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-5", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go new file mode 100644 index 00000000..55dd80d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -0,0 +1,232 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The client meter provider. + MeterProvider metrics.MeterProvider + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The client tracer provider. + TracerProvider tracing.TracerProvider + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go new file mode 100644 index 00000000..1ad103d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -0,0 +1,512 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateToken struct { +} + +func (*awsRestjson1_serializeOpCreateToken) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/token") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.Code != nil { + ok := object.Key("code") + ok.String(*v.Code) + } + + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + + if v.DeviceCode != nil { + ok := object.Key("deviceCode") + ok.String(*v.DeviceCode) + } + + if v.GrantType != nil { + ok := object.Key("grantType") + ok.String(*v.GrantType) + } + + if v.RedirectUri != nil { + ok := object.Key("redirectUri") + ok.String(*v.RedirectUri) + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + if v.Scope != nil { + ok := object.Key("scope") + if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpCreateTokenWithIAM struct { +} + +func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTokenWithIAMInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Assertion != nil { + ok := object.Key("assertion") + ok.String(*v.Assertion) + } + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.Code != nil { + ok := object.Key("code") + ok.String(*v.Code) + } + + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + + if v.GrantType != nil { + ok := object.Key("grantType") + ok.String(*v.GrantType) + } + + if v.RedirectUri != nil { + ok := object.Key("redirectUri") + ok.String(*v.RedirectUri) + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + if v.RequestedTokenType != nil { + ok := object.Key("requestedTokenType") + ok.String(*v.RequestedTokenType) + } + + if v.Scope != nil { + ok := object.Key("scope") + if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { + return err + } + } + + if v.SubjectToken != nil { + ok := object.Key("subjectToken") + ok.String(*v.SubjectToken) + } + + if v.SubjectTokenType != nil { + ok := object.Key("subjectTokenType") + ok.String(*v.SubjectTokenType) + } + + return nil +} + +type awsRestjson1_serializeOpRegisterClient struct { +} + +func (*awsRestjson1_serializeOpRegisterClient) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterClientInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/client/register") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientName != nil { + ok := object.Key("clientName") + ok.String(*v.ClientName) + } + + if v.ClientType != nil { + ok := object.Key("clientType") + ok.String(*v.ClientType) + } + + if v.EntitledApplicationArn != nil { + ok := object.Key("entitledApplicationArn") + ok.String(*v.EntitledApplicationArn) + } + + if v.GrantTypes != nil { + ok := object.Key("grantTypes") + if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil { + return err + } + } + + if v.IssuerUrl != nil { + ok := object.Key("issuerUrl") + ok.String(*v.IssuerUrl) + } + + if v.RedirectUris != nil { + ok := object.Key("redirectUris") + if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil { + return err + } + } + + if v.Scopes != nil { + ok := object.Key("scopes") + if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/device_authorization") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.StartUrl != nil { + ok := object.Key("startUrl") + ok.String(*v.StartUrl) + } + + return nil +} + +func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go new file mode 100644 index 00000000..2cfe7b48 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -0,0 +1,428 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AccessDeniedException" + } + return *e.ErrorCodeOverride +} +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AuthorizationPendingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AuthorizationPendingException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AuthorizationPendingException" + } + return *e.ErrorCodeOverride +} +func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that an error from the service occurred while trying to process a +// request. +type InternalServerException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerException" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret . +type InvalidClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientMetadataException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientMetadataException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientMetadataException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateTokenrequest with an invalid grant type. +type InvalidGrantException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidGrantException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRedirectUriException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRedirectUriException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRedirectUriException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that something is wrong with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a token provided as input to the request was issued by and is +// only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + Endpoint *string + Region *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestRegionException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestRegionException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestRegionException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidScopeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidScopeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidScopeException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is making the request too frequently and is more than +// the service can handle. +type SlowDownException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SlowDownException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SlowDownException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "SlowDownException" + } + return *e.ErrorCodeOverride +} +func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is not currently authorized to make the request. This +// can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedClientException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedGrantTypeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedGrantTypeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedGrantTypeException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go new file mode 100644 index 00000000..2e8f3ea0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go @@ -0,0 +1,22 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +// This structure contains Amazon Web Services-specific parameter extensions for +// the token endpoint responses and includes the identity context. +type AwsAdditionalDetails struct { + + // STS context assertion that carries a user identifier to the Amazon Web Services + // service that it calls and can be used to obtain an identity-enhanced IAM role + // session. This value corresponds to the sts:identity_context claim in the ID + // token. + IdentityContext *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go new file mode 100644 index 00000000..9c17e4c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go @@ -0,0 +1,184 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateToken struct { +} + +func (*validateOpCreateToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateTokenWithIAM struct { +} + +func (*validateOpCreateTokenWithIAM) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTokenWithIAMInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTokenWithIAMInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterClient struct { +} + +func (*validateOpRegisterClient) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterClientInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterClientInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartDeviceAuthorization struct { +} + +func (*validateOpStartDeviceAuthorization) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartDeviceAuthorizationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) +} + +func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After) +} + +func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) +} + +func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) +} + +func validateOpCreateTokenInput(v *CreateTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.GrantType == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.GrantType == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterClientInput(v *RegisterClientInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} + if v.ClientName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientName")) + } + if v.ClientType == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.StartUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md new file mode 100644 index 00000000..6656137c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -0,0 +1,639 @@ +# v1.33.19 (2025-04-10) + +* No change notes available for this release. + +# v1.33.18 (2025-04-03) + +* No change notes available for this release. + +# v1.33.17 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.33.16 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.15 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.14 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.13 (2025-02-04) + +* No change notes available for this release. + +# v1.33.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.33.9 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.33.8 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.7 (2025-01-14) + +* No change notes available for this release. + +# v1.33.6 (2025-01-10) + +* **Documentation**: Fixed typos in the descriptions. + +# v1.33.5 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.4 (2025-01-08) + +* No change notes available for this release. + +# v1.33.3 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.2 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.0 (2024-11-14) + +* **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks. + +# v1.32.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.4 (2024-10-03) + +* No change notes available for this release. + +# v1.31.3 (2024-09-27) + +* No change notes available for this release. + +# v1.31.2 (2024-09-25) + +* No change notes available for this release. + +# v1.31.1 (2024-09-23) + +* No change notes available for this release. + +# v1.31.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.30.7 (2024-09-04) + +* No change notes available for this release. + +# v1.30.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.5 (2024-08-22) + +* No change notes available for this release. + +# v1.30.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.29.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.13 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.12 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.10 (2024-05-23) + +* No change notes available for this release. + +# v1.28.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.28.6 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.5 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.3 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.2 (2024-03-04) + +* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.27.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.27.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.7 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.6 (2023-12-20) + +* No change notes available for this release. + +# v1.26.5 (2023-12-08) + +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.26.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. +* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication. + +# v1.26.2 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Documentation**: Documentation updates for AWS Security Token Service. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.6 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.5 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.25.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.3 (2023-11-17) + +* **Documentation**: API updates for the AWS Security Token Service + +# v1.25.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-10-02) + +* **Feature**: STS API updates for assumeRole + +# v1.22.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.21.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.1 (2023-08-01) + +* No change notes available for this release. + +# v1.21.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.1 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-07-25) + +* **Feature**: API updates for the AWS Security Token Service + +# v1.19.3 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.2 (2023-06-15) + +* No change notes available for this release. + +# v1.19.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-05-08) + +* **Feature**: Documentation updates for AWS Security Token Service. + +# v1.18.11 (2023-05-04) + +* No change notes available for this release. + +# v1.18.10 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-04-10) + +* No change notes available for this release. + +# v1.18.8 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# v1.18.2 (2023-01-25) + +* **Documentation**: Doc only change to update wording in a key topic + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + +# v1.18.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.17.7 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-11-22) + +* No change notes available for this release. + +# v1.17.4 (2022-11-17) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.17.3 (2022-11-16) + +* No change notes available for this release. + +# v1.17.2 (2022-11-10) + +* No change notes available for this release. + +# v1.17.1 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-10-21) + +* **Feature**: Add presign functionality for sts:AssumeRole operation +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2022-08-30) + +* No change notes available for this release. + +# v1.16.14 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2022-05-16) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.16.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.10.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-07-15) + +* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* **Documentation**: Updated service model to latest revision. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go new file mode 100644 index 00000000..fca363d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -0,0 +1,1095 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "sync/atomic" + "time" +) + +const ServiceID = "STS" +const ServiceAPIVersion = "2011-06-15" + +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sts") +} + +// Client provides the API client to make operations call for AWS Security Token +// Service. +type Client struct { + options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveEndpointResolverV2(&options) + + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + + resolveAuthSchemeResolver(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttempts(&options) + + ignoreAnonymousAuth(&options) + + wrapWithAnonymousAuth(&options) + + resolveAuthSchemes(&options) + + client := &Client{ + options: options, + } + + initializeTimeOffsetResolver(client) + + return client +} + +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + finalizeOperationRetryMaxAttempts(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + ctx, err = withOperationMetrics(ctx, options.MeterProvider) + if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + + return result, metadata, err +} + +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion) + if len(options.AppID) > 0 { + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) + } + + return nil +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + }) + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err + } + return nil +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignContextPolyfillMiddleware struct { +} + +func (*presignContextPolyfillMiddleware) ID() string { + return "presignContextPolyfill" +} + +func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + schemeID := rscheme.Scheme.SchemeID() + + if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { + if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr) + } + } else if schemeID == "aws.auth#sigv4a" { + if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) + } + } + + return next.HandleFinalize(ctx, in) +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { + stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) + } + if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { + stack.Finalize.Remove((*retry.Attempt)(nil).ID()) + } + if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { + stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) + } + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { + return err + } + + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + // convert request to a GET request + err = query.AddAsGetRequestMiddleware(stack) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go new file mode 100644 index 00000000..524e36eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -0,0 +1,550 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources. These temporary credentials consist of an access +// key ID, a secret access key, and a security token. Typically, you use AssumeRole +// within your account or for cross-account access. For a comparison of AssumeRole +// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the +// IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: You +// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies to this operation. +// You can pass a single JSON policy document to use as an inline session policy. +// You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use +// as managed session policies. The plaintext that you use for both inline and +// managed session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies what +// can be done with the role. You specify the trusted principal that is allowed to +// assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have +// permissions that are delegated from the account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of the +// following: +// +// - Attach a policy to the user that allows the user to call AssumeRole (as long +// as the role's trust policy trusts the account). +// +// - Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the same +// account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM +// User Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information when +// you call AssumeRole . This is useful for cross-account scenarios to ensure that +// the user that assumes the role has been authenticated with an Amazon Web +// Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication might +// look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide. +// +// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that the +// MFA device produces. +// +// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { + if params == nil { + params = &AssumeRoleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleInput struct { + + // The Amazon Resource Name (ARN) of the role to assume. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role is + // assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the account + // that owns the role. The role session name is also used in the ARN of the assumed + // role principal. This means that subsequent cross-account API requests that use + // the temporary security credentials will expose the role session name to the + // external account in their CloudTrail logs. + // + // For security purposes, administrators can view this field in [CloudTrail logs] to help identify + // who performed an action in Amazon Web Services. Your administrator might require + // that you specify your user name as the session name when you assume the role. + // For more information, see [sts:RoleSessionName]sts:RoleSessionName . + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds + // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname + // + // This member is required. + RoleSessionName *string + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for the + // role. The maximum session duration setting can have a value from 1 hour to 12 + // hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API + // role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of up to + // 43200 seconds (12 hours), depending on the maximum session duration setting for + // your role. However, if you assume a role using role chaining and provide a + // DurationSeconds parameter value greater than one hour, the operation fails. To + // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + DurationSeconds *int32 + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A + // cross-account role is usually set up to trust everyone in an account. Therefore, + // the administrator of the trusting account might send an external ID to the + // administrator of the trusted account. That way, only someone with the ID can + // assume the role, rather than everyone in the account. For more information about + // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- + // + // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html + ExternalId *string + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // For more information about role session permissions, see [Session policies]. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + PolicyArns []types.PolicyDescriptorType + + // A list of previously acquired trusted context assertions in the format of a + // JSON array. The trusted context assertion is signed and encrypted by Amazon Web + // Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which the + // trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + ProvidedContexts []types.ProvidedContext + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy of + // the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as + // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user ). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. The source identity value persists across [chained role]sessions. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: +=,.@-. You cannot use a + // value that begins with the text aws: . This prefix is reserved for Amazon Web + // Services internal use. + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity + SourceIdentity *string + + // A list of session tags that you want to pass. Each session tag consists of a + // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions] + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // Additionally, if you used temporary credentials to perform this operation, the + // new session inherits any transitive session tags from the calling session. If + // you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. For + // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide. + // + // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs + Tags []types.Tag + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition that + // tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string + + // A list of keys for session tags that you want to set as transitive. If you set + // a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. + // + // This parameter is optional. The transitive status of a session tag does not + // impact its packed binary size. + // + // If you choose not to specify a transitive tag key, then no tags are passed from + // this session to any subsequent sessions. + // + // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining + TransitiveTagKeys []string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRole request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. +type AssumeRoleOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "AssumeRole", + } +} + +// PresignAssumeRole is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &AssumeRoleInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns, + c.client.addOperationAssumeRoleMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go new file mode 100644 index 00000000..400f809e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -0,0 +1,458 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated via a SAML authentication response. This operation provides a +// mechanism for tying an enterprise identity store or directory to role-based +// Amazon Web Services access without user-specific credentials or configuration. +// For a comparison of AssumeRoleWithSAML with the other API operations that +// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of an +// access key ID, a secret access key, and a security token. Applications can use +// these temporary security credentials to sign calls to Amazon Web Services +// services. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML +// authentication response's SessionNotOnOrAfter value, whichever is shorter. You +// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the +// maximum session duration setting for the role. This setting can have a value +// from 1 hour to 12 hours. To learn how to view the maximum value for your role, +// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console URL. +// For more information, see [Using IAM Roles]in the IAM User Guide. +// +// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume a +// role using role chaining and provide a DurationSeconds parameter value greater +// than one hour, the operation fails. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used to +// make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys in +// the metadata document that is uploaded for the SAML provider entity for your +// identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The +// entry includes the value in the NameID element of the SAML assertion. We +// recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the +// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML +// assertion as session tags. Each session tag consists of a key name and an +// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, session tags override the role's tags with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # SAML Configuration +// +// Before your application can call AssumeRoleWithSAML , you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web +// Services. Additionally, you must use Identity and Access Management (IAM) to +// create a SAML provider entity in your Amazon Web Services account that +// represents your identity provider. You must also create an IAM role that +// specifies this SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// [About SAML 2.0-based Federation] +// - in the IAM User Guide. +// +// [Creating SAML Identity Providers] +// - in the IAM User Guide. +// +// [Configuring a Relying Party and Claims] +// - in the IAM User Guide. +// +// [Creating a Role for SAML 2.0 Federation] +// - in the IAM User Guide. +// +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html +// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { + if params == nil { + params = &AssumeRoleWithSAMLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithSAMLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithSAMLInput struct { + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the + // IdP. + // + // This member is required. + PrincipalArn *string + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide. + // + // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html + // + // This member is required. + SAMLAssertion *string + + // The duration, in seconds, of the role session. Your role session lasts for the + // duration that you specify for the DurationSeconds parameter, or until the time + // specified in the SAML authentication response's SessionNotOnOrAfter value, + // whichever is shorter. You can provide a DurationSeconds value from 900 seconds + // (15 minutes) up to the maximum session duration setting for the role. This + // setting can have a value from 1 hour to 12 hours. If you specify a value higher + // than this setting, the operation fails. For example, if you specify a session + // duration of 12 hours, but your administrator set the maximum session duration to + // 6 hours, your operation fails. To learn how to view the maximum value for your + // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // For more information about role session permissions, see [Session policies]. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + PolicyArns []types.PolicyDescriptorType + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. +type AssumeRoleWithSAMLOutput struct { + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *types.AssumedRoleUser + + // The value of the Recipient attribute of the SubjectConfirmationData element of + // the SAML assertion. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. + Credentials *types.Credentials + + // The value of the Issuer element of the SAML assertion. + Issuer *string + + // A hash value based on the concatenation of the following: + // + // - The Issuer response value. + // + // - The Amazon Web Services account ID. + // + // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // + // The combination of NameQualifier and Subject can be used to uniquely identify a + // user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + NameQualifier *string + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The value in the SourceIdentity attribute in the SAML assertion. The source + // identity value persists across [chained role]sessions. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your SAML identity provider to use an + // attribute associated with your users, like user name or email, as the source + // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to + // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in + // the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient or + // persistent . + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format , + // that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . + // If the format includes any other prefix, the format is returned with no + // modifications. + SubjectType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "AssumeRoleWithSAML", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go new file mode 100644 index 00000000..e5708cbd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -0,0 +1,478 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated in a mobile or web application with a web identity provider. +// Example providers include the OAuth 2.0 providers Login with Amazon and +// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities]. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also +// supply the user with a consistent identity throughout the lifetime of an +// application. +// +// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application (for +// example, on mobile devices) that requests temporary security credentials without +// including long-term Amazon Web Services credentials in the application. You also +// don't need to deploy server-based proxy services that use long-term Amazon Web +// Services credentials. Instead, the identity of the caller is validated by using +// a token from the web identity provider. For a comparison of +// AssumeRoleWithWebIdentity with the other API operations that produce temporary +// credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service API +// operations. +// +// # Session Duration +// +// By default, the temporary security credentials created by +// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional +// DurationSeconds parameter to specify the duration of your session. You can +// provide a value from 900 seconds (15 minutes) up to the maximum session duration +// setting for the role. This setting can have a value from 1 hour to 12 hours. To +// learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide. +// The maximum session duration limit applies when you use the AssumeRole* API +// operations or the assume-role* CLI commands. However the limit does not apply +// when you use those operations to create a console URL. For more information, see +// [Using IAM Roles]in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can be +// used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken API +// operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, the session tag overrides the role tag with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Identities +// +// Before your application can call AssumeRoleWithWebIdentity , you must have an +// identity token from a supported identity provider and create a role that the +// application can assume. The role that your application assumes must trust the +// identity provider that is associated with the identity token. In other words, +// the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the [Subject]of the provided web identity token. We recommend +// that you avoid using any personally identifiable information (PII) in this +// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. +// +// For more information about how to use OIDC federation and the +// AssumeRoleWithWebIdentity API, see the following resources: +// +// [Using Web Identity Federation API Operations for Mobile Apps] +// - and [Federation Through a Web-based Identity Provider]. +// +// [Amazon Web Services SDK for iOS Developer Guide] +// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the +// identity providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ +// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration +// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html +// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes +func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { + if params == nil { + params = &AssumeRoleWithWebIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithWebIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithWebIdentityInput struct { + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles]. + // The trust policies of these roles must accept the cognito-identity.amazonaws.com + // service principal and must contain the cognito-identity.amazonaws.com:aud + // condition key to restrict role assumption to users from your intended identity + // pools. A policy that trusts Amazon Cognito identity pools without this condition + // creates a risk that a user from an unintended identity pool can assume the role. + // For more information, see [Trust policies for IAM roles in Basic (Classic) authentication]in the Amazon Cognito Developer Guide. + // + // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html + // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Typically, you pass the name or + // identifier that is associated with the user who is using your application. That + // way, the temporary security credentials that your application will use are + // associated with that user. This session name is included as part of the ARN and + // assumed role ID in the AssumedRoleUser response element. + // + // For security purposes, administrators can view this field in [CloudTrail logs] to help identify + // who performed an action in Amazon Web Services. Your administrator might require + // that you specify your user name as the session name when you assume the role. + // For more information, see [sts:RoleSessionName]sts:RoleSessionName . + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds + // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname + // + // This member is required. + RoleSessionName *string + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the + // identity provider. Your application must get this token by authenticating the + // user who is using your application with a web identity provider before the + // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token + // must be formatted as either an integer or a long integer. Tokens must be signed + // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or + // ES512). + // + // This member is required. + WebIdentityToken *string + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // For more information about role session permissions, see [Session policies]. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + PolicyArns []types.PolicyDescriptorType + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The intended audience (also known as client ID) of the web identity token. This + // is traditionally the client identifier issued to the application that requested + // the web identity token. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed in + // the AssumeRoleWithWebIdentity request. + Provider *string + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON + // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon + // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles] + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html + SourceIdentity *string + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with the + // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user + // and the application that acquired the WebIdentityToken (pairwise identifier). + // For OpenID Connect ID tokens, this field contains the value returned by the + // identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "AssumeRoleWithWebIdentity", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go new file mode 100644 index 00000000..a0f7a467 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go @@ -0,0 +1,223 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of short term credentials you can use to perform privileged tasks +// on a member account in your organization. +// +// Before you can launch a privileged session, you must have centralized root +// access in your organization. For steps to enable this feature, see [Centralize root access for member accounts]in the IAM +// User Guide. +// +// The STS global endpoint is not supported for AssumeRoot. You must send this +// request to a Regional STS endpoint. For more information, see [Endpoints]. +// +// You can track AssumeRoot in CloudTrail logs to determine what actions were +// performed in a session. For more information, see [Track privileged tasks in CloudTrail]in the IAM User Guide. +// +// [Endpoints]: https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html#sts-endpoints +// [Track privileged tasks in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-track-privileged-tasks.html +// [Centralize root access for member accounts]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-enable-root-access.html +func (c *Client) AssumeRoot(ctx context.Context, params *AssumeRootInput, optFns ...func(*Options)) (*AssumeRootOutput, error) { + if params == nil { + params = &AssumeRootInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoot", params, optFns, c.addOperationAssumeRootMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRootOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRootInput struct { + + // The member account principal ARN or account ID. + // + // This member is required. + TargetPrincipal *string + + // The identity based policy that scopes the session to the privileged tasks that + // can be performed. You can use one of following Amazon Web Services managed + // policies to scope root session actions. + // + // [IAMAuditRootUserCredentials] + // + // [IAMCreateRootUserPassword] + // + // [IAMDeleteRootUserCredentials] + // + // [S3UnlockBucketPolicy] + // + // [SQSUnlockQueuePolicy] + // + // [IAMDeleteRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMDeleteRootUserCredentials + // [IAMCreateRootUserPassword]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMCreateRootUserPassword + // [IAMAuditRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMAuditRootUserCredentials + // [S3UnlockBucketPolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-S3UnlockBucketPolicy + // [SQSUnlockQueuePolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-SQSUnlockQueuePolicy + // + // This member is required. + TaskPolicyArn *types.PolicyDescriptorType + + // The duration, in seconds, of the privileged session. The value can range from 0 + // seconds up to the maximum session duration of 900 seconds (15 minutes). If you + // specify a value higher than this setting, the operation fails. + // + // By default, the value is set to 900 seconds. + DurationSeconds *int32 + + noSmithyDocumentSerde +} + +type AssumeRootOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. + Credentials *types.Credentials + + // The source identity specified by the principal that is calling the AssumeRoot + // operation. + // + // You can use the aws:SourceIdentity condition key to control access based on the + // value of source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles] + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoot{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoot{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoot"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpAssumeRootValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoot(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoot(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "AssumeRoot", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go new file mode 100644 index 00000000..9e7cb17d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -0,0 +1,195 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decodes additional information about the authorization status of a request from +// an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he or she +// has requested, the request returns a Client.UnauthorizedOperation response (an +// HTTP 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether that +// operation returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation should +// not see. To decode an authorization status message, a user must be granted +// permissions through an IAM [policy]to request the DecodeAuthorizationMessage ( +// sts:DecodeAuthorizationMessage ) action. +// +// The decoded message includes the following type of information: +// +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User +// Guide. +// +// - The principal who made the request. +// +// - The requested action. +// +// - The requested resource. +// +// - The values of condition keys in the context of the user's request. +// +// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow +// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html +func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { + if params == nil { + params = &DecodeAuthorizationMessageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecodeAuthorizationMessageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecodeAuthorizationMessageInput struct { + + // The encoded message that was returned with the response. + // + // This member is required. + EncodedMessage *string + + noSmithyDocumentSerde +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + + // The API returns a response with the decoded message. + DecodedMessage *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DecodeAuthorizationMessage", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go new file mode 100644 index 00000000..28c05f13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -0,0 +1,186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, +// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example, +// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access +// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs to +// you, you can sign in as the root user and review your root user access keys. +// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who +// requested the temporary credentials for an ASIA access key, view the STS events +// in your [CloudTrail logs]in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might be +// active, inactive, or deleted. Active keys might not have permissions to perform +// an operation. Providing a deleted access key might return an error that the key +// doesn't exist. +// +// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html +// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html +// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html +func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { + if params == nil { + params = &GetAccessKeyInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAccessKeyInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAccessKeyInfoInput struct { + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters that + // can consist of any upper- or lowercase letter or digit. + // + // This member is required. + AccessKeyId *string + + noSmithyDocumentSerde +} + +type GetAccessKeyInfoOutput struct { + + // The number used to identify the Amazon Web Services account. + Account *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetAccessKeyInfo", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go new file mode 100644 index 00000000..de137b7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -0,0 +1,198 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details about the IAM user or role whose credentials are used to call +// the operation. +// +// No permissions are required to perform this operation. If an administrator +// attaches a policy to your identity that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when access is denied. +// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide. +// +// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa +func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCallerIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCallerIdentityInput struct { + noSmithyDocumentSerde +} + +// Contains the response to a successful GetCallerIdentity request, including information about the +// entity making the request. +type GetCallerIdentityOutput struct { + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed in + // the aws:userid column in the [Principal table]found on the Policy Variables reference page in + // the IAM User Guide. + // + // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable + UserId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetCallerIdentity", + } +} + +// PresignGetCallerIdentity is used to generate a presigned HTTP Request which +// contains presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, + c.client.addOperationGetCallerIdentityMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go new file mode 100644 index 00000000..67c041b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -0,0 +1,399 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials (consisting of an access key +// ID, a secret access key, and a security token) for a user. A typical use is in a +// proxy application that gets temporary security credentials on behalf of +// distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based +// application. For a comparison of GetFederationToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security +// credentials of an Amazon Web Services account root user rather than an IAM user +// that you create for the purpose of a proxy application, we do not recommend it. +// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// # Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by +// using the root user credentials have a maximum duration of 3,600 seconds (1 +// hour). +// +// # Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service with the following exceptions: +// +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// +// - You cannot call any STS operations except GetCallerIdentity . +// +// You can use temporary credentials for single sign-on (SSO) to the console. +// +// You must pass an inline or managed [session policy] to this operation. You can pass a single +// JSON policy document to use as an inline session policy. You can also specify up +// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session +// policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM user +// policies and the session policies that you pass. This gives you a way to further +// restrict the permissions for a federated user. You cannot use session policies +// to grant more permissions than those that are defined in the permissions policy +// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For +// information about using GetFederationToken to create temporary security +// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker]. +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session in the +// Principal element of the policy, the session has the permissions allowed by the +// policy. These permissions are granted in addition to the permissions granted by +// the session policies. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This means +// that you cannot have separate Department and department tag keys. Assume that +// the user that you are federating has the Department = Marketing tag and you +// pass the department = engineering session tag. Department and department are +// not saved as separate tags, and the session tag passed in the request takes +// precedence over the user tag. +// +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito]: http://aws.amazon.com/cognito/ +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { + if params == nil { + params = &GetFederationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetFederationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetFederationTokenInput struct { + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob ). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon S3 + // bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // This member is required. + Name *string + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using root user credentials are restricted to a maximum of 3,600 seconds (one + // hour). If the specified duration is longer than one hour, the session obtained + // by using root user credentials defaults to one hour. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as a managed session policy. The policies must exist in the same account as + // the IAM user that is requesting federated access. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. The plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. You can provide up to 10 managed policy + // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General + // Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + PolicyArns []types.PolicyDescriptorType + + // A list of session tags. Each session tag consists of a key name and an + // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User + // Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user tag + // with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + Tags []types.Tag + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetFederationToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. +type GetFederationTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. + Credentials *types.Credentials + + // Identifiers for the federated user associated with the credentials (such as + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use + // the federated user's ARN in your resource-based policies, such as an Amazon S3 + // bucket policy. + FederatedUser *types.FederatedUser + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetFederationToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go new file mode 100644 index 00000000..903d151c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -0,0 +1,245 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary credentials for an Amazon Web Services account or +// IAM user. The credentials consist of an access key ID, a secret access key, and +// a security token. Typically, you use GetSessionToken if you want to use MFA to +// protect programmatic calls to specific Amazon Web Services API operations like +// Amazon EC2 StopInstances . +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is +// associated with their MFA device. Using the temporary security credentials that +// the call returns, IAM users can then make programmatic calls to API operations +// that require MFA authentication. An incorrect MFA code causes the API to return +// an access denied error. For a comparison of GetSessionToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose of +// the sts:GetSessionToken operation is to authenticate the user using MFA. You +// cannot use policies to control authentication operations. For more information, +// see [Permissions for GetSessionToken]in the IAM User Guide. +// +// # Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon Web +// Services security credentials of an IAM user. Credentials that are created by +// IAM users are valid for the duration that you specify. This duration can range +// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours), +// with a default of 43,200 seconds (12 hours). Credentials based on account +// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1 +// hour), with a default of 1 hour. +// +// # Permissions +// +// The temporary security credentials created by GetSessionToken can be used to +// make API calls to any Amazon Web Services service with the following exceptions: +// +// - You cannot call any IAM API operations unless MFA authentication +// information is included in the request. +// +// - You cannot call any STS API except AssumeRole or GetCallerIdentity . +// +// The credentials that GetSessionToken returns are based on permissions +// associated with the IAM user whose credentials were used to call the operation. +// The temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do not +// recommend it. If GetSessionToken is called using root user credentials, the +// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in +// the IAM User Guide +// +// For more information about using GetSessionToken to create temporary +// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. +// +// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html +// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { + if params == nil { + params = &GetSessionTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionTokenInput struct { + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for + // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds + // (one hour). If the duration is longer than one hour, the session for Amazon Web + // Services account owners defaults to one hour. + DurationSeconds *int32 + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM user + // has a policy that requires MFA authentication. The value is either the serial + // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name + // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You + // can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- + SerialNumber *string + + // The value provided by the MFA device, if MFA is required. If any policy + // requires the IAM user to submit an MFA code, specify this value. If MFA + // authentication is required, the user must provide a code when requesting a set + // of temporary security credentials. A user who fails to provide the code receives + // an "access denied" response when requesting resources that require MFA + // authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetSessionToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. +type GetSessionTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. + Credentials *types.Credentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetSessionToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go new file mode 100644 index 00000000..a90b2b73 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -0,0 +1,325 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthParamsRegion(ctx, params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, + + "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + {SchemeID: smithyauth.SchemeIDAnonymous}, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "sts") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + + span.End() + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { + options Options +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go new file mode 100644 index 00000000..59349890 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -0,0 +1,2719 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strconv" + "strings" + "time" +) + +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + +type awsAwsquery_deserializeOpAssumeRole struct { +} + +func (*awsAwsquery_deserializeOpAssumeRole) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) + } + output := &AssumeRoleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) + } + output := &AssumeRoleWithSAMLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithSAMLResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) + } + output := &AssumeRoleWithWebIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPCommunicationError", errorCode): + return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoot struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoot) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoot(response, &metadata) + } + output := &AssumeRootOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRootResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRootOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) + } + output := &DecodeAuthorizationMessageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DecodeAuthorizationMessageResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): + return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) + } + output := &GetAccessKeyInfoOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetAccessKeyInfoResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) + } + output := &GetCallerIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetCallerIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetFederationToken struct { +} + +func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) + } + output := &GetFederationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetFederationTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetSessionToken struct { +} + +func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) + } + output := &GetSessionTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetSessionTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPCommunicationErrorException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPRejectedClaimException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidAuthorizationMessageException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidIdentityTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.MalformedPolicyDocumentException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.PackedPolicyTooLargeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.RegionDisabledException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AssumedRoleUser + if *v == nil { + sv = &types.AssumedRoleUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("AssumedRoleId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AssumedRoleId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Credentials + if *v == nil { + sv = &types.Credentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FederatedUser + if *v == nil { + sv = &types.FederatedUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("FederatedUserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FederatedUserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPCommunicationErrorException + if *v == nil { + sv = &types.IDPCommunicationErrorException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPRejectedClaimException + if *v == nil { + sv = &types.IDPRejectedClaimException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidAuthorizationMessageException + if *v == nil { + sv = &types.InvalidAuthorizationMessageException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidIdentityTokenException + if *v == nil { + sv = &types.InvalidIdentityTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PackedPolicyTooLargeException + if *v == nil { + sv = &types.PackedPolicyTooLargeException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RegionDisabledException + if *v == nil { + sv = &types.RegionDisabledException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleOutput + if *v == nil { + sv = &AssumeRoleOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithSAMLOutput + if *v == nil { + sv = &AssumeRoleWithSAMLOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Issuer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Issuer = ptr.String(xtv) + } + + case strings.EqualFold("NameQualifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NameQualifier = ptr.String(xtv) + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("Subject", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Subject = ptr.String(xtv) + } + + case strings.EqualFold("SubjectType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectType = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithWebIdentityOutput + if *v == nil { + sv = &AssumeRoleWithWebIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Provider", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Provider = ptr.String(xtv) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectFromWebIdentityToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRootOutput + if *v == nil { + sv = &AssumeRootOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DecodeAuthorizationMessageOutput + if *v == nil { + sv = &DecodeAuthorizationMessageOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DecodedMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DecodedMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetAccessKeyInfoOutput + if *v == nil { + sv = &GetAccessKeyInfoOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetCallerIdentityOutput + if *v == nil { + sv = &GetCallerIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("UserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetFederationTokenOutput + if *v == nil { + sv = &GetFederationTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("FederatedUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetSessionTokenOutput + if *v == nil { + sv = &GetSessionTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go new file mode 100644 index 00000000..cbb19c7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -0,0 +1,13 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sts provides the API client, operations, and parameter types for AWS +// Security Token Service. +// +// # Security Token Service +// +// Security Token Service (STS) enables you to request temporary, +// limited-privilege credentials for users. This guide provides descriptions of the +// STS API. For more information about using this service, see [Temporary Security Credentials]. +// +// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html +package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go new file mode 100644 index 00000000..dca2ce35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -0,0 +1,1136 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "sts" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string + + // Whether the global endpoint should be used, rather then the regional endpoint + // for us-east-1. + // + // Defaults to false if no value is + // provided. + // + // AWS::STS::UseGlobalEndpoint + UseGlobalEndpoint *bool +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + if p.UseGlobalEndpoint == nil { + return fmt.Errorf("parameter UseGlobalEndpoint is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + + if p.UseGlobalEndpoint == nil { + p.UseGlobalEndpoint = ptr.Bool(false) + } + return p +} + +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + _UseGlobalEndpoint := *params.UseGlobalEndpoint + + if _UseGlobalEndpoint == true { + if !(params.Endpoint != nil) { + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == false { + if _UseDualStack == false { + if _Region == "ap-northeast-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-south-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-southeast-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-southeast-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "aws-global" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "ca-central-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-central-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-north-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-3" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "sa-east-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "us-east-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "us-east-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "us-west-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _Region == "us-west-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if _PartitionResult.SupportsFIPS == true { + if _PartitionResult.Name == "aws-us-gov" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + if _Region == "aws-global" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetSigV4SigningName(&sp, "sts") + smithyhttp.SetSigV4ASigningName(&sp, "sts") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = bindRegion(options.Region) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + span.End() + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json new file mode 100644 index 00000000..86bb3b79 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -0,0 +1,43 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AssumeRole.go", + "api_op_AssumeRoleWithSAML.go", + "api_op_AssumeRoleWithWebIdentity.go", + "api_op_AssumeRoot.go", + "api_op_DecodeAuthorizationMessage.go", + "api_op_GetAccessKeyInfo.go", + "api_op_GetCallerIdentity.go", + "api_op_GetFederationToken.go", + "api_op_GetSessionToken.go", + "auth.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "options.go", + "protocol_test.go", + "serializers.go", + "snapshot_test.go", + "sra_operation_order_test.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.22", + "module": "github.com/aws/aws-sdk-go-v2/service/sts", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go new file mode 100644 index 00000000..a984a2a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sts + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.33.19" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go new file mode 100644 index 00000000..8ee3eed8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -0,0 +1,557 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver STS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "eu-isoe-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go new file mode 100644 index 00000000..e1398f3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -0,0 +1,232 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The client meter provider. + MeterProvider metrics.MeterProvider + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The client tracer provider. + TracerProvider tracing.TracerProvider + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go new file mode 100644 index 00000000..96b22213 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -0,0 +1,1005 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsquery_serializeOpAssumeRole struct { +} + +func (*awsAwsquery_serializeOpAssumeRole) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRole") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithSAML") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithWebIdentity") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoot struct { +} + +func (*awsAwsquery_serializeOpAssumeRoot) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRootInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoot") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRootInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DecodeAuthorizationMessage") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetAccessKeyInfo") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCallerIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetCallerIdentity") + body.Key("Version").String("2011-06-15") + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetFederationToken struct { +} + +func (*awsAwsquery_serializeOpGetFederationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetFederationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetFederationToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetSessionToken struct { +} + +func (*awsAwsquery_serializeOpGetSessionToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetSessionToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { + object := value.Object() + _ = object + + if v.Arn != nil { + objectKey := object.Key("arn") + objectKey.String(*v.Arn) + } + + return nil +} + +func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error { + object := value.Object() + _ = object + + if v.ContextAssertion != nil { + objectKey := object.Key("ContextAssertion") + objectKey.String(*v.ContextAssertion) + } + + if v.ProviderArn != nil { + objectKey := object.Key("ProviderArn") + objectKey.String(*v.ProviderArn) + } + + return nil +} + +func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { + object := value.Object() + _ = object + + if v.Key != nil { + objectKey := object.Key("Key") + objectKey.String(*v.Key) + } + + if v.Value != nil { + objectKey := object.Key("Value") + objectKey.String(*v.Value) + } + + return nil +} + +func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.ExternalId != nil { + objectKey := object.Key("ExternalId") + objectKey.String(*v.ExternalId) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProvidedContexts != nil { + objectKey := object.Key("ProvidedContexts") + if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil { + return err + } + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.SourceIdentity != nil { + objectKey := object.Key("SourceIdentity") + objectKey.String(*v.SourceIdentity) + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + if v.TransitiveTagKeys != nil { + objectKey := object.Key("TransitiveTagKeys") + if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.PrincipalArn != nil { + objectKey := object.Key("PrincipalArn") + objectKey.String(*v.PrincipalArn) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.SAMLAssertion != nil { + objectKey := object.Key("SAMLAssertion") + objectKey.String(*v.SAMLAssertion) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProviderId != nil { + objectKey := object.Key("ProviderId") + objectKey.String(*v.ProviderId) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.WebIdentityToken != nil { + objectKey := object.Key("WebIdentityToken") + objectKey.String(*v.WebIdentityToken) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.TargetPrincipal != nil { + objectKey := object.Key("TargetPrincipal") + objectKey.String(*v.TargetPrincipal) + } + + if v.TaskPolicyArn != nil { + objectKey := object.Key("TaskPolicyArn") + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(v.TaskPolicyArn, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { + object := value.Object() + _ = object + + if v.EncodedMessage != nil { + objectKey := object.Key("EncodedMessage") + objectKey.String(*v.EncodedMessage) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { + object := value.Object() + _ = object + + if v.AccessKeyId != nil { + objectKey := object.Key("AccessKeyId") + objectKey.String(*v.AccessKeyId) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + return nil +} + +func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Name != nil { + objectKey := object.Key("Name") + objectKey.String(*v.Name) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go new file mode 100644 index 00000000..041629bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -0,0 +1,248 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The web identity token that was passed is expired or is not valid. Get a new +// identity token from the identity provider and then retry the request. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request could not be fulfilled because the identity provider (IDP) that was +// asked to verify the incoming identity token could not be reached. This is often +// a transient error caused by network conditions. Retry the request a limited +// number of times so that you don't exceed the request rate. If the error +// persists, the identity provider might be down or not responding. +type IDPCommunicationErrorException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPCommunicationErrorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPCommunicationErrorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPCommunicationErrorException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPCommunicationError" + } + return *e.ErrorCodeOverride +} +func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The identity provider (IdP) reported that authentication failed. This might be +// because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it can +// also mean that the claim has expired or has been explicitly revoked. +type IDPRejectedClaimException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPRejectedClaimException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPRejectedClaimException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPRejectedClaimException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPRejectedClaim" + } + return *e.ErrorCodeOverride +} +func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as line +// breaks, or if the message has expired. +type InvalidAuthorizationMessageException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidAuthorizationMessageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAuthorizationMessageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAuthorizationMessageException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidAuthorizationMessageException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry the +// request. +type InvalidIdentityTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidIdentityTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidIdentityTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidIdentityTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidIdentityToken" + } + return *e.ErrorCodeOverride +} +func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +type MalformedPolicyDocumentException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MalformedPolicyDocument" + } + return *e.ErrorCodeOverride +} +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session tags +// into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper size +// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide. +// +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length +type PackedPolicyTooLargeException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PackedPolicyTooLargeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PackedPolicyTooLargeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PackedPolicyTooLargeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PackedPolicyTooLarge" + } + return *e.ErrorCodeOverride +} +func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM +// User Guide. +// +// [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html +type RegionDisabledException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RegionDisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegionDisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegionDisabledException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RegionDisabledException" + } + return *e.ErrorCodeOverride +} +func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go new file mode 100644 index 00000000..dff7a3c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -0,0 +1,144 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + + // The ARN of the temporary security credentials that are returned from the AssumeRole + // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in + // the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html + // + // This member is required. + Arn *string + + // A unique identifier that contains the role ID and the role session name of the + // role that is being assumed. The role ID is generated by Amazon Web Services when + // the role is created. + // + // This member is required. + AssumedRoleId *string + + noSmithyDocumentSerde +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + + // The access key ID that identifies the temporary security credentials. + // + // This member is required. + AccessKeyId *string + + // The date on which the current credentials expire. + // + // This member is required. + Expiration *time.Time + + // The secret access key that can be used to sign requests. + // + // This member is required. + SecretAccessKey *string + + // The token that users must pass to the service API to use the temporary + // credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + + // The ARN that specifies the federated user that is associated with the + // credentials. For more information about ARNs and how to use them in policies, + // see [IAM Identifiers]in the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html + // + // This member is required. + Arn *string + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // This member is required. + FederatedUserId *string + + noSmithyDocumentSerde +} + +// A reference to the IAM managed policy that is passed as a session policy for a +// role session or a federated user session. +type PolicyDescriptorType struct { + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web + // Services General Reference. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + Arn *string + + noSmithyDocumentSerde +} + +// Contains information about the provided context. This includes the signed and +// encrypted trusted context assertion and the context provider ARN from which the +// trusted context assertion was generated. +type ProvidedContext struct { + + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon Web + // Services STS. + ContextAssertion *string + + // The context provider ARN from which the trusted context assertion was generated. + ProviderArn *string + + noSmithyDocumentSerde +} + +// You can pass custom key-value pair attributes when you assume a role or +// federate a user. These are called session tags. You can then use the session +// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User +// Guide. +// +// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +type Tag struct { + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + // + // This member is required. + Key *string + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go new file mode 100644 index 00000000..1026e221 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -0,0 +1,347 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAssumeRole struct { +} + +func (*validateOpAssumeRole) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithSAML struct { +} + +func (*validateOpAssumeRoleWithSAML) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithWebIdentity struct { +} + +func (*validateOpAssumeRoleWithWebIdentity) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoot struct { +} + +func (*validateOpAssumeRoot) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRootInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRootInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecodeAuthorizationMessage struct { +} + +func (*validateOpDecodeAuthorizationMessage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetAccessKeyInfo struct { +} + +func (*validateOpGetAccessKeyInfo) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetAccessKeyInfoInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetFederationToken struct { +} + +func (*validateOpGetFederationToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetFederationTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetFederationTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) +} + +func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) +} + +func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) +} + +func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After) +} + +func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) +} + +func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) +} + +func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagListType(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagListType"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleInput(v *AssumeRoleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if v.SAMLAssertion == nil { + invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.WebIdentityToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRootInput(v *AssumeRootInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRootInput"} + if v.TargetPrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetPrincipal")) + } + if v.TaskPolicyArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskPolicyArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} + if v.EncodedMessage == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index f6bb14d0..c3516e01 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -74,7 +74,9 @@ const ( ) // AWS ISOE (Europe) partition's regions. -const () +const ( + EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West. +) // AWS ISOF partition's regions. const () @@ -244,13 +246,6 @@ var awsPartition = partition{ }, }, Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "access-analyzer": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -298,6 +293,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -331,6 +332,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -661,6 +671,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -694,6 +713,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -789,30 +817,60 @@ var awsPartition = partition{ }, "airflow": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -822,6 +880,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -831,6 +898,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -847,6 +917,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -905,6 +978,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -963,6 +1039,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -1018,18 +1097,33 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -1882,6 +1976,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1906,6 +2003,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -3758,6 +3858,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4017,15 +4126,75 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "autoscaling": service{ @@ -4327,6 +4496,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4444,91 +4616,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -4693,9 +4780,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "bedrock-ap-northeast-1", }: endpoint{ @@ -4704,6 +4797,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-south-1", + }: endpoint{ + Hostname: "bedrock.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-ap-southeast-1", }: endpoint{ @@ -4712,6 +4813,22 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-ca-central-1", + }: endpoint{ + Hostname: "bedrock.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-eu-central-1", }: endpoint{ @@ -4720,6 +4837,38 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-eu-west-1", + }: endpoint{ + Hostname: "bedrock.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-2", + }: endpoint{ + Hostname: "bedrock.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-3", + }: endpoint{ + Hostname: "bedrock.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-fips-us-east-1", }: endpoint{ @@ -4744,6 +4893,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-south-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-runtime-ap-southeast-1", }: endpoint{ @@ -4752,6 +4909,22 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-central-1", }: endpoint{ @@ -4760,6 +4933,38 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-runtime-eu-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-3", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-fips-us-east-1", }: endpoint{ @@ -4776,6 +4981,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-runtime-sa-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-runtime-us-east-1", }: endpoint{ @@ -4792,6 +5005,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-sa-east-1", + }: endpoint{ + Hostname: "bedrock.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-us-east-1", }: endpoint{ @@ -4808,9 +5029,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -4835,6 +5071,9 @@ var awsPartition = partition{ }, "braket": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -4865,6 +5104,12 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5079,69 +5324,262 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.api.aws", + }, }, }, "cloudcontrolapi": service{ @@ -5149,78 +5587,216 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-west-1.api.aws", + }, endpointKey{ Region: "ca-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -5278,51 +5854,123 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.api.aws", + }, }, }, "clouddirectory": service{ @@ -5853,6 +6501,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6747,6 +7398,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6759,6 +7413,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6768,18 +7425,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6828,6 +7497,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6877,6 +7549,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6889,6 +7564,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6898,18 +7576,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6958,6 +7648,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7137,12 +7830,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7781,6 +8489,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8777,6 +9503,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8980,9 +9712,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9007,6 +9751,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -10263,6 +11025,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10302,6 +11073,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11310,6 +12090,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11490,6 +12279,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -12276,12 +13074,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -12430,6 +13234,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13374,6 +14181,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13500,6 +14316,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -13885,6 +14710,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13918,6 +14752,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-ca-central-1", }: endpoint{ @@ -13927,6 +14770,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-east-1", }: endpoint{ @@ -14026,6 +14878,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-east-1", }: endpoint{ @@ -14399,6 +15269,18 @@ var awsPartition = partition{ }, }, }, + "globalaccelerator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "glue": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14437,6 +15319,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14866,6 +15751,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15044,13 +15932,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -15155,6 +16036,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15170,6 +16054,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15182,6 +16069,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16198,16 +17088,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "iotroborunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "iotsecuredtunneling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -16875,6 +17755,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kafka-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -16908,6 +17797,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17063,12 +17961,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17488,6 +18401,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18163,6 +19079,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18636,6 +19555,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18935,6 +19857,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19096,66 +20021,222 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -19195,18 +20276,48 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -19216,6 +20327,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -19225,6 +20342,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -19234,6 +20357,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -19350,6 +20479,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -19389,6 +20521,9 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -19600,12 +20735,30 @@ var awsPartition = partition{ }, "media-pipelines-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -19809,6 +20962,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -19858,15 +21014,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19909,6 +21077,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -20148,6 +21319,9 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -20166,6 +21340,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21024,6 +22213,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21339,6 +22531,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21668,6 +22863,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -21692,6 +22895,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -21700,6 +22911,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -21732,6 +22951,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -22053,12 +23280,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -22329,91 +23562,490 @@ var awsPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.af-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-4", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-4.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-north-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.il-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "sa-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.sa-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -22811,6 +24443,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -22835,6 +24475,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -22843,6 +24491,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -22875,6 +24531,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -22965,6 +24629,19 @@ var awsPartition = partition{ }, }, }, + "private-networks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "profile": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -23365,6 +25042,9 @@ var awsPartition = partition{ }, "quicksight": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -23380,15 +25060,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24427,12 +26119,24 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24442,18 +26146,93 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, }, }, "rekognition": service{ @@ -24777,156 +26556,273 @@ var awsPartition = partition{ }, }, "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, + }, + "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.af-south-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-east-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-3.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-south-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-south-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-south-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-3.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-4", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-4.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ca-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.ca-central-1.api.aws", - }, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-central-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-central-2", - }: endpoint{ - Hostname: "resource-explorer-2.eu-central-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-north-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-north-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-south-1.api.aws", - }, + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-west-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-west-2", - }: endpoint{ - Hostname: "resource-explorer-2.eu-west-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", }: endpoint{ - Hostname: "resource-explorer-2.eu-west-3.api.aws", + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "il-central-1", + Region: "fips-us-east-2", }: endpoint{ - Hostname: "resource-explorer-2.il-central-1.api.aws", + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "me-central-1", + Region: "fips-us-west-1", }: endpoint{ - Hostname: "resource-explorer-2.me-central-1.api.aws", + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "me-south-1", + Region: "fips-us-west-2", }: endpoint{ - Hostname: "resource-explorer-2.me-south-1.api.aws", + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.sa-east-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-east-1.api.aws", + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-east-2.api.aws", + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-west-1.api.aws", + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, }: endpoint{ - Hostname: "resource-explorer-2.us-west-2.api.aws", + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", }, }, }, - "resource-groups": service{ + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -24991,179 +26887,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - }, - }, - }, - "robomaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "rolesanywhere": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -25399,33 +27122,81 @@ var awsPartition = partition{ }, "rum": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -26195,6 +27966,44 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "s3-control.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "s3-control.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -26271,6 +28080,25 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "s3-control.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -26309,6 +28137,44 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "s3-control.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "s3-control.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -26358,6 +28224,55 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -26377,6 +28292,25 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "s3-control.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -26396,6 +28330,44 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "s3-control.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "s3-control.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -26453,6 +28425,63 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "s3-control.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "s3-control.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "s3-control.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -27765,21 +29794,85 @@ var awsPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "servicecatalog": service{ @@ -28227,6 +30320,36 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29041,18 +31164,36 @@ var awsPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -29062,15 +31203,33 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -29089,6 +31248,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -29098,6 +31275,18 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -29107,6 +31296,24 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -29378,6 +31585,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -30344,6 +32554,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -30353,9 +32566,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30368,6 +32587,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -30607,6 +32829,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31404,41 +33644,115 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "tax": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "tax.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "textract": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -31487,39 +33801,87 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.api.aws", + }, }, }, "thinclient": service{ @@ -31926,6 +34288,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "transfer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31959,6 +34330,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -32127,6 +34507,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -32185,6 +34580,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32209,6 +34619,63 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -32221,15 +34688,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, }, }, "voice-chime": service{ @@ -32389,6 +34880,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -32410,12 +34907,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -33418,6 +35924,23 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "wafv2.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -33662,6 +36185,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -34503,6 +37035,21 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34726,16 +37273,6 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, "batch": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34789,9 +37326,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "cloudformation": service{ @@ -35263,6 +37812,19 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "entitlement.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35450,6 +38012,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -35690,7 +38262,7 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, @@ -35762,6 +38334,16 @@ var awscnPartition = partition{ }, }, }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "oam": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35817,10 +38399,28 @@ var awscnPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, }, }, "pipes": service{ @@ -35937,31 +38537,9 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, - }, - }, - "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn", - }, endpointKey{ Region: "cn-northwest-1", - }: endpoint{ - Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn", - }, + }: endpoint{}, }, }, "resource-groups": service{ @@ -37432,13 +40010,37 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, }, }, @@ -37462,16 +40064,6 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -37522,6 +40114,38 @@ var awsusgovPartition = partition{ }, "bedrock": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -37606,21 +40230,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws", + }, }, }, "clouddirectory": service{ @@ -38137,9 +40785,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "data-ats.iot": service{ @@ -38320,20 +40998,40 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", }, }, }, @@ -39019,6 +41717,15 @@ var awsusgovPartition = partition{ }, "email": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -39028,6 +41735,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -39041,12 +41757,82 @@ var awsusgovPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + }, }, }, "es": service{ @@ -40232,6 +43018,62 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40447,6 +43289,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40653,6 +43505,13 @@ var awsusgovPartition = partition{ }, }, }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -40986,12 +43845,76 @@ var awsusgovPartition = partition{ }, "pi": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -41379,31 +44302,6 @@ var awsusgovPartition = partition{ }, }, }, - "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-gov-west-1.api.aws", - }, - }, - }, "resource-groups": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -41567,6 +44465,13 @@ var awsusgovPartition = partition{ }, }, }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "runtime.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -41974,6 +44879,46 @@ var awsusgovPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -42225,6 +45170,78 @@ var awsusgovPartition = partition{ }, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -42289,6 +45306,15 @@ var awsusgovPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -42298,6 +45324,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -42887,21 +45922,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.api.aws", + }, }, }, "transcribe": service{ @@ -43032,6 +46091,46 @@ var awsusgovPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "waf-regional": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43296,6 +46395,20 @@ var awsisoPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43308,6 +46421,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "appconfig": service{ @@ -43345,6 +46461,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43770,6 +46896,55 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43952,42 +47127,12 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - }, }, }, "rbin": service{ @@ -44032,37 +47177,10 @@ var awsisoPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-iso-east-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-iso-west-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-iso-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -44071,16 +47189,7 @@ var awsisoPartition = partition{ endpointKey{ Region: "rds.us-iso-west-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -44093,12 +47202,12 @@ var awsisoPartition = partition{ Region: "us-iso-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -44111,12 +47220,12 @@ var awsisoPartition = partition{ Region: "us-iso-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-west-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -44127,40 +47236,20 @@ var awsisoPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-iso-east-1", + Region: "us-iso-east-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-iso-west-1", + Region: "us-iso-west-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", }, }, }, @@ -44269,6 +47358,131 @@ var awsisoPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "s3-control.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "s3-control.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44284,6 +47498,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sns": service{ @@ -44400,6 +47617,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44492,6 +47716,20 @@ var awsisobPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44499,6 +47737,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44525,6 +47770,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44774,6 +48026,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44861,6 +48120,20 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44898,24 +48171,9 @@ var awsisobPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - }, }, }, "rbin": service{ @@ -44942,28 +48200,10 @@ var awsisobPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-isob-east-1", - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-isob-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -44976,12 +48216,12 @@ var awsisobPartition = partition{ Region: "us-isob-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", }, endpointKey{ Region: "us-isob-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -44992,22 +48232,12 @@ var awsisobPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-isob-east-1", + Region: "us-isob-east-1", }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", }, }, }, @@ -45080,6 +48310,82 @@ var awsisobPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -45133,6 +48439,37 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "streams.dynamodb": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -45229,7 +48566,11 @@ var awsisoePartition = partition{ SignatureVersions: []string{"v4"}, }, }, - Regions: regions{}, + Regions: regions{ + "eu-isoe-west-1": region{ + Description: "EU ISOE West", + }, + }, Services: services{}, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 4601f883..992ed046 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -256,8 +256,17 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro s := a.Expected.(int) result = s == req.HTTPResponse.StatusCode case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) + switch ex := a.Expected.(type) { + case string: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == ex + } + case bool: + if ex { + result = err != nil + } else { + result = err == nil + } } default: waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index f3ce8183..2945185b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -349,7 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s if cfg.hasSSOTokenProviderConfiguration() { skippedFiles = 0 for _, f := range files { - section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)) if ok { var ssoSession ssoSession ssoSession.setFromIniSection(section) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 0dfd96d5..e72db4de 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.6" +const SDKVersion = "1.55.7" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 05833405..2ca0b19d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri } func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { v.Set(prefix, "") return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go b/vendor/github.com/aws/aws-sdk-go/service/athena/api.go deleted file mode 100644 index 1cf9a2c9..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go +++ /dev/null @@ -1,19271 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package athena - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opBatchGetNamedQuery = "BatchGetNamedQuery" - -// BatchGetNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the BatchGetNamedQuery operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchGetNamedQuery for more information on using the BatchGetNamedQuery -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the BatchGetNamedQueryRequest method. -// req, resp := client.BatchGetNamedQueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetNamedQuery -func (c *Athena) BatchGetNamedQueryRequest(input *BatchGetNamedQueryInput) (req *request.Request, output *BatchGetNamedQueryOutput) { - op := &request.Operation{ - Name: opBatchGetNamedQuery, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchGetNamedQueryInput{} - } - - output = &BatchGetNamedQueryOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchGetNamedQuery API operation for Amazon Athena. -// -// Returns the details of a single named query or a list of up to 50 queries, -// which you provide as an array of query ID strings. Requires you to have access -// to the workgroup in which the queries were saved. Use ListNamedQueriesInput -// to get the list of named query IDs in the specified workgroup. If information -// could not be retrieved for a submitted query ID, information about the query -// ID submitted is listed under UnprocessedNamedQueryId. Named queries differ -// from executed queries. Use BatchGetQueryExecutionInput to get details about -// each unique query execution, and ListQueryExecutionsInput to get a list of -// query execution IDs. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation BatchGetNamedQuery for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetNamedQuery -func (c *Athena) BatchGetNamedQuery(input *BatchGetNamedQueryInput) (*BatchGetNamedQueryOutput, error) { - req, out := c.BatchGetNamedQueryRequest(input) - return out, req.Send() -} - -// BatchGetNamedQueryWithContext is the same as BatchGetNamedQuery with the addition of -// the ability to pass a context and additional request options. -// -// See BatchGetNamedQuery for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) BatchGetNamedQueryWithContext(ctx aws.Context, input *BatchGetNamedQueryInput, opts ...request.Option) (*BatchGetNamedQueryOutput, error) { - req, out := c.BatchGetNamedQueryRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchGetPreparedStatement = "BatchGetPreparedStatement" - -// BatchGetPreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the BatchGetPreparedStatement operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchGetPreparedStatement for more information on using the BatchGetPreparedStatement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the BatchGetPreparedStatementRequest method. -// req, resp := client.BatchGetPreparedStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetPreparedStatement -func (c *Athena) BatchGetPreparedStatementRequest(input *BatchGetPreparedStatementInput) (req *request.Request, output *BatchGetPreparedStatementOutput) { - op := &request.Operation{ - Name: opBatchGetPreparedStatement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchGetPreparedStatementInput{} - } - - output = &BatchGetPreparedStatementOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchGetPreparedStatement API operation for Amazon Athena. -// -// Returns the details of a single prepared statement or a list of up to 256 -// prepared statements for the array of prepared statement names that you provide. -// Requires you to have access to the workgroup to which the prepared statements -// belong. If a prepared statement cannot be retrieved for the name specified, -// the statement is listed in UnprocessedPreparedStatementNames. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation BatchGetPreparedStatement for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetPreparedStatement -func (c *Athena) BatchGetPreparedStatement(input *BatchGetPreparedStatementInput) (*BatchGetPreparedStatementOutput, error) { - req, out := c.BatchGetPreparedStatementRequest(input) - return out, req.Send() -} - -// BatchGetPreparedStatementWithContext is the same as BatchGetPreparedStatement with the addition of -// the ability to pass a context and additional request options. -// -// See BatchGetPreparedStatement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) BatchGetPreparedStatementWithContext(ctx aws.Context, input *BatchGetPreparedStatementInput, opts ...request.Option) (*BatchGetPreparedStatementOutput, error) { - req, out := c.BatchGetPreparedStatementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opBatchGetQueryExecution = "BatchGetQueryExecution" - -// BatchGetQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the BatchGetQueryExecution operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See BatchGetQueryExecution for more information on using the BatchGetQueryExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the BatchGetQueryExecutionRequest method. -// req, resp := client.BatchGetQueryExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetQueryExecution -func (c *Athena) BatchGetQueryExecutionRequest(input *BatchGetQueryExecutionInput) (req *request.Request, output *BatchGetQueryExecutionOutput) { - op := &request.Operation{ - Name: opBatchGetQueryExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &BatchGetQueryExecutionInput{} - } - - output = &BatchGetQueryExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// BatchGetQueryExecution API operation for Amazon Athena. -// -// Returns the details of a single query execution or a list of up to 50 query -// executions, which you provide as an array of query execution ID strings. -// Requires you to have access to the workgroup in which the queries ran. To -// get a list of query execution IDs, use ListQueryExecutionsInput$WorkGroup. -// Query executions differ from named (saved) queries. Use BatchGetNamedQueryInput -// to get details about named queries. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation BatchGetQueryExecution for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetQueryExecution -func (c *Athena) BatchGetQueryExecution(input *BatchGetQueryExecutionInput) (*BatchGetQueryExecutionOutput, error) { - req, out := c.BatchGetQueryExecutionRequest(input) - return out, req.Send() -} - -// BatchGetQueryExecutionWithContext is the same as BatchGetQueryExecution with the addition of -// the ability to pass a context and additional request options. -// -// See BatchGetQueryExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) BatchGetQueryExecutionWithContext(ctx aws.Context, input *BatchGetQueryExecutionInput, opts ...request.Option) (*BatchGetQueryExecutionOutput, error) { - req, out := c.BatchGetQueryExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCancelCapacityReservation = "CancelCapacityReservation" - -// CancelCapacityReservationRequest generates a "aws/request.Request" representing the -// client's request for the CancelCapacityReservation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CancelCapacityReservation for more information on using the CancelCapacityReservation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CancelCapacityReservationRequest method. -// req, resp := client.CancelCapacityReservationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CancelCapacityReservation -func (c *Athena) CancelCapacityReservationRequest(input *CancelCapacityReservationInput) (req *request.Request, output *CancelCapacityReservationOutput) { - op := &request.Operation{ - Name: opCancelCapacityReservation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CancelCapacityReservationInput{} - } - - output = &CancelCapacityReservationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CancelCapacityReservation API operation for Amazon Athena. -// -// Cancels the capacity reservation with the specified name. Cancelled reservations -// remain in your account and will be deleted 45 days after cancellation. During -// the 45 days, you cannot re-purpose or reuse a reservation that has been cancelled, -// but you can refer to its tags and view it for historical reference. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CancelCapacityReservation for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CancelCapacityReservation -func (c *Athena) CancelCapacityReservation(input *CancelCapacityReservationInput) (*CancelCapacityReservationOutput, error) { - req, out := c.CancelCapacityReservationRequest(input) - return out, req.Send() -} - -// CancelCapacityReservationWithContext is the same as CancelCapacityReservation with the addition of -// the ability to pass a context and additional request options. -// -// See CancelCapacityReservation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CancelCapacityReservationWithContext(ctx aws.Context, input *CancelCapacityReservationInput, opts ...request.Option) (*CancelCapacityReservationOutput, error) { - req, out := c.CancelCapacityReservationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateCapacityReservation = "CreateCapacityReservation" - -// CreateCapacityReservationRequest generates a "aws/request.Request" representing the -// client's request for the CreateCapacityReservation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateCapacityReservation for more information on using the CreateCapacityReservation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateCapacityReservationRequest method. -// req, resp := client.CreateCapacityReservationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateCapacityReservation -func (c *Athena) CreateCapacityReservationRequest(input *CreateCapacityReservationInput) (req *request.Request, output *CreateCapacityReservationOutput) { - op := &request.Operation{ - Name: opCreateCapacityReservation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateCapacityReservationInput{} - } - - output = &CreateCapacityReservationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CreateCapacityReservation API operation for Amazon Athena. -// -// Creates a capacity reservation with the specified name and number of requested -// data processing units. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CreateCapacityReservation for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateCapacityReservation -func (c *Athena) CreateCapacityReservation(input *CreateCapacityReservationInput) (*CreateCapacityReservationOutput, error) { - req, out := c.CreateCapacityReservationRequest(input) - return out, req.Send() -} - -// CreateCapacityReservationWithContext is the same as CreateCapacityReservation with the addition of -// the ability to pass a context and additional request options. -// -// See CreateCapacityReservation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CreateCapacityReservationWithContext(ctx aws.Context, input *CreateCapacityReservationInput, opts ...request.Option) (*CreateCapacityReservationOutput, error) { - req, out := c.CreateCapacityReservationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateDataCatalog = "CreateDataCatalog" - -// CreateDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the CreateDataCatalog operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateDataCatalog for more information on using the CreateDataCatalog -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateDataCatalogRequest method. -// req, resp := client.CreateDataCatalogRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog -func (c *Athena) CreateDataCatalogRequest(input *CreateDataCatalogInput) (req *request.Request, output *CreateDataCatalogOutput) { - op := &request.Operation{ - Name: opCreateDataCatalog, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateDataCatalogInput{} - } - - output = &CreateDataCatalogOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CreateDataCatalog API operation for Amazon Athena. -// -// Creates (registers) a data catalog with the specified name and properties. -// Catalogs created are visible to all users of the same Amazon Web Services -// account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CreateDataCatalog for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog -func (c *Athena) CreateDataCatalog(input *CreateDataCatalogInput) (*CreateDataCatalogOutput, error) { - req, out := c.CreateDataCatalogRequest(input) - return out, req.Send() -} - -// CreateDataCatalogWithContext is the same as CreateDataCatalog with the addition of -// the ability to pass a context and additional request options. -// -// See CreateDataCatalog for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CreateDataCatalogWithContext(ctx aws.Context, input *CreateDataCatalogInput, opts ...request.Option) (*CreateDataCatalogOutput, error) { - req, out := c.CreateDataCatalogRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateNamedQuery = "CreateNamedQuery" - -// CreateNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the CreateNamedQuery operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateNamedQuery for more information on using the CreateNamedQuery -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateNamedQueryRequest method. -// req, resp := client.CreateNamedQueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNamedQuery -func (c *Athena) CreateNamedQueryRequest(input *CreateNamedQueryInput) (req *request.Request, output *CreateNamedQueryOutput) { - op := &request.Operation{ - Name: opCreateNamedQuery, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateNamedQueryInput{} - } - - output = &CreateNamedQueryOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateNamedQuery API operation for Amazon Athena. -// -// Creates a named query in the specified workgroup. Requires that you have -// access to the workgroup. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CreateNamedQuery for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNamedQuery -func (c *Athena) CreateNamedQuery(input *CreateNamedQueryInput) (*CreateNamedQueryOutput, error) { - req, out := c.CreateNamedQueryRequest(input) - return out, req.Send() -} - -// CreateNamedQueryWithContext is the same as CreateNamedQuery with the addition of -// the ability to pass a context and additional request options. -// -// See CreateNamedQuery for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CreateNamedQueryWithContext(ctx aws.Context, input *CreateNamedQueryInput, opts ...request.Option) (*CreateNamedQueryOutput, error) { - req, out := c.CreateNamedQueryRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateNotebook = "CreateNotebook" - -// CreateNotebookRequest generates a "aws/request.Request" representing the -// client's request for the CreateNotebook operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateNotebook for more information on using the CreateNotebook -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateNotebookRequest method. -// req, resp := client.CreateNotebookRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNotebook -func (c *Athena) CreateNotebookRequest(input *CreateNotebookInput) (req *request.Request, output *CreateNotebookOutput) { - op := &request.Operation{ - Name: opCreateNotebook, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateNotebookInput{} - } - - output = &CreateNotebookOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateNotebook API operation for Amazon Athena. -// -// Creates an empty ipynb file in the specified Apache Spark enabled workgroup. -// Throws an error if a file in the workgroup with the same name already exists. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CreateNotebook for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNotebook -func (c *Athena) CreateNotebook(input *CreateNotebookInput) (*CreateNotebookOutput, error) { - req, out := c.CreateNotebookRequest(input) - return out, req.Send() -} - -// CreateNotebookWithContext is the same as CreateNotebook with the addition of -// the ability to pass a context and additional request options. -// -// See CreateNotebook for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CreateNotebookWithContext(ctx aws.Context, input *CreateNotebookInput, opts ...request.Option) (*CreateNotebookOutput, error) { - req, out := c.CreateNotebookRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePreparedStatement = "CreatePreparedStatement" - -// CreatePreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the CreatePreparedStatement operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePreparedStatement for more information on using the CreatePreparedStatement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreatePreparedStatementRequest method. -// req, resp := client.CreatePreparedStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePreparedStatement -func (c *Athena) CreatePreparedStatementRequest(input *CreatePreparedStatementInput) (req *request.Request, output *CreatePreparedStatementOutput) { - op := &request.Operation{ - Name: opCreatePreparedStatement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePreparedStatementInput{} - } - - output = &CreatePreparedStatementOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CreatePreparedStatement API operation for Amazon Athena. -// -// Creates a prepared statement for use with SQL queries in Athena. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CreatePreparedStatement for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePreparedStatement -func (c *Athena) CreatePreparedStatement(input *CreatePreparedStatementInput) (*CreatePreparedStatementOutput, error) { - req, out := c.CreatePreparedStatementRequest(input) - return out, req.Send() -} - -// CreatePreparedStatementWithContext is the same as CreatePreparedStatement with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePreparedStatement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CreatePreparedStatementWithContext(ctx aws.Context, input *CreatePreparedStatementInput, opts ...request.Option) (*CreatePreparedStatementOutput, error) { - req, out := c.CreatePreparedStatementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreatePresignedNotebookUrl = "CreatePresignedNotebookUrl" - -// CreatePresignedNotebookUrlRequest generates a "aws/request.Request" representing the -// client's request for the CreatePresignedNotebookUrl operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreatePresignedNotebookUrl for more information on using the CreatePresignedNotebookUrl -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreatePresignedNotebookUrlRequest method. -// req, resp := client.CreatePresignedNotebookUrlRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePresignedNotebookUrl -func (c *Athena) CreatePresignedNotebookUrlRequest(input *CreatePresignedNotebookUrlInput) (req *request.Request, output *CreatePresignedNotebookUrlOutput) { - op := &request.Operation{ - Name: opCreatePresignedNotebookUrl, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreatePresignedNotebookUrlInput{} - } - - output = &CreatePresignedNotebookUrlOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreatePresignedNotebookUrl API operation for Amazon Athena. -// -// Gets an authentication token and the URL at which the notebook can be accessed. -// During programmatic access, CreatePresignedNotebookUrl must be called every -// 10 minutes to refresh the authentication token. For information about granting -// programmatic access, see Grant programmatic access (https://docs.aws.amazon.com/athena/latest/ug/setting-up.html#setting-up-grant-programmatic-access). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CreatePresignedNotebookUrl for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePresignedNotebookUrl -func (c *Athena) CreatePresignedNotebookUrl(input *CreatePresignedNotebookUrlInput) (*CreatePresignedNotebookUrlOutput, error) { - req, out := c.CreatePresignedNotebookUrlRequest(input) - return out, req.Send() -} - -// CreatePresignedNotebookUrlWithContext is the same as CreatePresignedNotebookUrl with the addition of -// the ability to pass a context and additional request options. -// -// See CreatePresignedNotebookUrl for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CreatePresignedNotebookUrlWithContext(ctx aws.Context, input *CreatePresignedNotebookUrlInput, opts ...request.Option) (*CreatePresignedNotebookUrlOutput, error) { - req, out := c.CreatePresignedNotebookUrlRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateWorkGroup = "CreateWorkGroup" - -// CreateWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the CreateWorkGroup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateWorkGroup for more information on using the CreateWorkGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the CreateWorkGroupRequest method. -// req, resp := client.CreateWorkGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateWorkGroup -func (c *Athena) CreateWorkGroupRequest(input *CreateWorkGroupInput) (req *request.Request, output *CreateWorkGroupOutput) { - op := &request.Operation{ - Name: opCreateWorkGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateWorkGroupInput{} - } - - output = &CreateWorkGroupOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CreateWorkGroup API operation for Amazon Athena. -// -// Creates a workgroup with the specified name. A workgroup can be an Apache -// Spark enabled workgroup or an Athena SQL workgroup. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation CreateWorkGroup for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateWorkGroup -func (c *Athena) CreateWorkGroup(input *CreateWorkGroupInput) (*CreateWorkGroupOutput, error) { - req, out := c.CreateWorkGroupRequest(input) - return out, req.Send() -} - -// CreateWorkGroupWithContext is the same as CreateWorkGroup with the addition of -// the ability to pass a context and additional request options. -// -// See CreateWorkGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) CreateWorkGroupWithContext(ctx aws.Context, input *CreateWorkGroupInput, opts ...request.Option) (*CreateWorkGroupOutput, error) { - req, out := c.CreateWorkGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteCapacityReservation = "DeleteCapacityReservation" - -// DeleteCapacityReservationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCapacityReservation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteCapacityReservation for more information on using the DeleteCapacityReservation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteCapacityReservationRequest method. -// req, resp := client.DeleteCapacityReservationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteCapacityReservation -func (c *Athena) DeleteCapacityReservationRequest(input *DeleteCapacityReservationInput) (req *request.Request, output *DeleteCapacityReservationOutput) { - op := &request.Operation{ - Name: opDeleteCapacityReservation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteCapacityReservationInput{} - } - - output = &DeleteCapacityReservationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteCapacityReservation API operation for Amazon Athena. -// -// Deletes a cancelled capacity reservation. A reservation must be cancelled -// before it can be deleted. A deleted reservation is immediately removed from -// your account and can no longer be referenced, including by its ARN. A deleted -// reservation cannot be called by GetCapacityReservation, and deleted reservations -// do not appear in the output of ListCapacityReservations. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation DeleteCapacityReservation for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteCapacityReservation -func (c *Athena) DeleteCapacityReservation(input *DeleteCapacityReservationInput) (*DeleteCapacityReservationOutput, error) { - req, out := c.DeleteCapacityReservationRequest(input) - return out, req.Send() -} - -// DeleteCapacityReservationWithContext is the same as DeleteCapacityReservation with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteCapacityReservation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) DeleteCapacityReservationWithContext(ctx aws.Context, input *DeleteCapacityReservationInput, opts ...request.Option) (*DeleteCapacityReservationOutput, error) { - req, out := c.DeleteCapacityReservationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteDataCatalog = "DeleteDataCatalog" - -// DeleteDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDataCatalog operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteDataCatalog for more information on using the DeleteDataCatalog -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteDataCatalogRequest method. -// req, resp := client.DeleteDataCatalogRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog -func (c *Athena) DeleteDataCatalogRequest(input *DeleteDataCatalogInput) (req *request.Request, output *DeleteDataCatalogOutput) { - op := &request.Operation{ - Name: opDeleteDataCatalog, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteDataCatalogInput{} - } - - output = &DeleteDataCatalogOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteDataCatalog API operation for Amazon Athena. -// -// Deletes a data catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation DeleteDataCatalog for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog -func (c *Athena) DeleteDataCatalog(input *DeleteDataCatalogInput) (*DeleteDataCatalogOutput, error) { - req, out := c.DeleteDataCatalogRequest(input) - return out, req.Send() -} - -// DeleteDataCatalogWithContext is the same as DeleteDataCatalog with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteDataCatalog for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) DeleteDataCatalogWithContext(ctx aws.Context, input *DeleteDataCatalogInput, opts ...request.Option) (*DeleteDataCatalogOutput, error) { - req, out := c.DeleteDataCatalogRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteNamedQuery = "DeleteNamedQuery" - -// DeleteNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the DeleteNamedQuery operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteNamedQuery for more information on using the DeleteNamedQuery -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteNamedQueryRequest method. -// req, resp := client.DeleteNamedQueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNamedQuery -func (c *Athena) DeleteNamedQueryRequest(input *DeleteNamedQueryInput) (req *request.Request, output *DeleteNamedQueryOutput) { - op := &request.Operation{ - Name: opDeleteNamedQuery, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteNamedQueryInput{} - } - - output = &DeleteNamedQueryOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteNamedQuery API operation for Amazon Athena. -// -// Deletes the named query if you have access to the workgroup in which the -// query was saved. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation DeleteNamedQuery for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNamedQuery -func (c *Athena) DeleteNamedQuery(input *DeleteNamedQueryInput) (*DeleteNamedQueryOutput, error) { - req, out := c.DeleteNamedQueryRequest(input) - return out, req.Send() -} - -// DeleteNamedQueryWithContext is the same as DeleteNamedQuery with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteNamedQuery for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) DeleteNamedQueryWithContext(ctx aws.Context, input *DeleteNamedQueryInput, opts ...request.Option) (*DeleteNamedQueryOutput, error) { - req, out := c.DeleteNamedQueryRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteNotebook = "DeleteNotebook" - -// DeleteNotebookRequest generates a "aws/request.Request" representing the -// client's request for the DeleteNotebook operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteNotebook for more information on using the DeleteNotebook -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteNotebookRequest method. -// req, resp := client.DeleteNotebookRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNotebook -func (c *Athena) DeleteNotebookRequest(input *DeleteNotebookInput) (req *request.Request, output *DeleteNotebookOutput) { - op := &request.Operation{ - Name: opDeleteNotebook, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteNotebookInput{} - } - - output = &DeleteNotebookOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteNotebook API operation for Amazon Athena. -// -// Deletes the specified notebook. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation DeleteNotebook for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNotebook -func (c *Athena) DeleteNotebook(input *DeleteNotebookInput) (*DeleteNotebookOutput, error) { - req, out := c.DeleteNotebookRequest(input) - return out, req.Send() -} - -// DeleteNotebookWithContext is the same as DeleteNotebook with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteNotebook for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) DeleteNotebookWithContext(ctx aws.Context, input *DeleteNotebookInput, opts ...request.Option) (*DeleteNotebookOutput, error) { - req, out := c.DeleteNotebookRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeletePreparedStatement = "DeletePreparedStatement" - -// DeletePreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the DeletePreparedStatement operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeletePreparedStatement for more information on using the DeletePreparedStatement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeletePreparedStatementRequest method. -// req, resp := client.DeletePreparedStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeletePreparedStatement -func (c *Athena) DeletePreparedStatementRequest(input *DeletePreparedStatementInput) (req *request.Request, output *DeletePreparedStatementOutput) { - op := &request.Operation{ - Name: opDeletePreparedStatement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeletePreparedStatementInput{} - } - - output = &DeletePreparedStatementOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeletePreparedStatement API operation for Amazon Athena. -// -// Deletes the prepared statement with the specified name from the specified -// workgroup. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation DeletePreparedStatement for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeletePreparedStatement -func (c *Athena) DeletePreparedStatement(input *DeletePreparedStatementInput) (*DeletePreparedStatementOutput, error) { - req, out := c.DeletePreparedStatementRequest(input) - return out, req.Send() -} - -// DeletePreparedStatementWithContext is the same as DeletePreparedStatement with the addition of -// the ability to pass a context and additional request options. -// -// See DeletePreparedStatement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) DeletePreparedStatementWithContext(ctx aws.Context, input *DeletePreparedStatementInput, opts ...request.Option) (*DeletePreparedStatementOutput, error) { - req, out := c.DeletePreparedStatementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteWorkGroup = "DeleteWorkGroup" - -// DeleteWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteWorkGroup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteWorkGroup for more information on using the DeleteWorkGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the DeleteWorkGroupRequest method. -// req, resp := client.DeleteWorkGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteWorkGroup -func (c *Athena) DeleteWorkGroupRequest(input *DeleteWorkGroupInput) (req *request.Request, output *DeleteWorkGroupOutput) { - op := &request.Operation{ - Name: opDeleteWorkGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteWorkGroupInput{} - } - - output = &DeleteWorkGroupOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteWorkGroup API operation for Amazon Athena. -// -// Deletes the workgroup with the specified name. The primary workgroup cannot -// be deleted. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation DeleteWorkGroup for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteWorkGroup -func (c *Athena) DeleteWorkGroup(input *DeleteWorkGroupInput) (*DeleteWorkGroupOutput, error) { - req, out := c.DeleteWorkGroupRequest(input) - return out, req.Send() -} - -// DeleteWorkGroupWithContext is the same as DeleteWorkGroup with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteWorkGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) DeleteWorkGroupWithContext(ctx aws.Context, input *DeleteWorkGroupInput, opts ...request.Option) (*DeleteWorkGroupOutput, error) { - req, out := c.DeleteWorkGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opExportNotebook = "ExportNotebook" - -// ExportNotebookRequest generates a "aws/request.Request" representing the -// client's request for the ExportNotebook operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ExportNotebook for more information on using the ExportNotebook -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ExportNotebookRequest method. -// req, resp := client.ExportNotebookRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ExportNotebook -func (c *Athena) ExportNotebookRequest(input *ExportNotebookInput) (req *request.Request, output *ExportNotebookOutput) { - op := &request.Operation{ - Name: opExportNotebook, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ExportNotebookInput{} - } - - output = &ExportNotebookOutput{} - req = c.newRequest(op, input, output) - return -} - -// ExportNotebook API operation for Amazon Athena. -// -// Exports the specified notebook and its metadata. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ExportNotebook for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ExportNotebook -func (c *Athena) ExportNotebook(input *ExportNotebookInput) (*ExportNotebookOutput, error) { - req, out := c.ExportNotebookRequest(input) - return out, req.Send() -} - -// ExportNotebookWithContext is the same as ExportNotebook with the addition of -// the ability to pass a context and additional request options. -// -// See ExportNotebook for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ExportNotebookWithContext(ctx aws.Context, input *ExportNotebookInput, opts ...request.Option) (*ExportNotebookOutput, error) { - req, out := c.ExportNotebookRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCalculationExecution = "GetCalculationExecution" - -// GetCalculationExecutionRequest generates a "aws/request.Request" representing the -// client's request for the GetCalculationExecution operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCalculationExecution for more information on using the GetCalculationExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetCalculationExecutionRequest method. -// req, resp := client.GetCalculationExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecution -func (c *Athena) GetCalculationExecutionRequest(input *GetCalculationExecutionInput) (req *request.Request, output *GetCalculationExecutionOutput) { - op := &request.Operation{ - Name: opGetCalculationExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCalculationExecutionInput{} - } - - output = &GetCalculationExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCalculationExecution API operation for Amazon Athena. -// -// Describes a previously submitted calculation execution. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetCalculationExecution for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecution -func (c *Athena) GetCalculationExecution(input *GetCalculationExecutionInput) (*GetCalculationExecutionOutput, error) { - req, out := c.GetCalculationExecutionRequest(input) - return out, req.Send() -} - -// GetCalculationExecutionWithContext is the same as GetCalculationExecution with the addition of -// the ability to pass a context and additional request options. -// -// See GetCalculationExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetCalculationExecutionWithContext(ctx aws.Context, input *GetCalculationExecutionInput, opts ...request.Option) (*GetCalculationExecutionOutput, error) { - req, out := c.GetCalculationExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCalculationExecutionCode = "GetCalculationExecutionCode" - -// GetCalculationExecutionCodeRequest generates a "aws/request.Request" representing the -// client's request for the GetCalculationExecutionCode operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCalculationExecutionCode for more information on using the GetCalculationExecutionCode -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetCalculationExecutionCodeRequest method. -// req, resp := client.GetCalculationExecutionCodeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionCode -func (c *Athena) GetCalculationExecutionCodeRequest(input *GetCalculationExecutionCodeInput) (req *request.Request, output *GetCalculationExecutionCodeOutput) { - op := &request.Operation{ - Name: opGetCalculationExecutionCode, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCalculationExecutionCodeInput{} - } - - output = &GetCalculationExecutionCodeOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCalculationExecutionCode API operation for Amazon Athena. -// -// Retrieves the unencrypted code that was executed for the calculation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetCalculationExecutionCode for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionCode -func (c *Athena) GetCalculationExecutionCode(input *GetCalculationExecutionCodeInput) (*GetCalculationExecutionCodeOutput, error) { - req, out := c.GetCalculationExecutionCodeRequest(input) - return out, req.Send() -} - -// GetCalculationExecutionCodeWithContext is the same as GetCalculationExecutionCode with the addition of -// the ability to pass a context and additional request options. -// -// See GetCalculationExecutionCode for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetCalculationExecutionCodeWithContext(ctx aws.Context, input *GetCalculationExecutionCodeInput, opts ...request.Option) (*GetCalculationExecutionCodeOutput, error) { - req, out := c.GetCalculationExecutionCodeRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCalculationExecutionStatus = "GetCalculationExecutionStatus" - -// GetCalculationExecutionStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetCalculationExecutionStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCalculationExecutionStatus for more information on using the GetCalculationExecutionStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetCalculationExecutionStatusRequest method. -// req, resp := client.GetCalculationExecutionStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionStatus -func (c *Athena) GetCalculationExecutionStatusRequest(input *GetCalculationExecutionStatusInput) (req *request.Request, output *GetCalculationExecutionStatusOutput) { - op := &request.Operation{ - Name: opGetCalculationExecutionStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCalculationExecutionStatusInput{} - } - - output = &GetCalculationExecutionStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCalculationExecutionStatus API operation for Amazon Athena. -// -// Gets the status of a current calculation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetCalculationExecutionStatus for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionStatus -func (c *Athena) GetCalculationExecutionStatus(input *GetCalculationExecutionStatusInput) (*GetCalculationExecutionStatusOutput, error) { - req, out := c.GetCalculationExecutionStatusRequest(input) - return out, req.Send() -} - -// GetCalculationExecutionStatusWithContext is the same as GetCalculationExecutionStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetCalculationExecutionStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetCalculationExecutionStatusWithContext(ctx aws.Context, input *GetCalculationExecutionStatusInput, opts ...request.Option) (*GetCalculationExecutionStatusOutput, error) { - req, out := c.GetCalculationExecutionStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCapacityAssignmentConfiguration = "GetCapacityAssignmentConfiguration" - -// GetCapacityAssignmentConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetCapacityAssignmentConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCapacityAssignmentConfiguration for more information on using the GetCapacityAssignmentConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetCapacityAssignmentConfigurationRequest method. -// req, resp := client.GetCapacityAssignmentConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityAssignmentConfiguration -func (c *Athena) GetCapacityAssignmentConfigurationRequest(input *GetCapacityAssignmentConfigurationInput) (req *request.Request, output *GetCapacityAssignmentConfigurationOutput) { - op := &request.Operation{ - Name: opGetCapacityAssignmentConfiguration, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCapacityAssignmentConfigurationInput{} - } - - output = &GetCapacityAssignmentConfigurationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCapacityAssignmentConfiguration API operation for Amazon Athena. -// -// Gets the capacity assignment configuration for a capacity reservation, if -// one exists. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetCapacityAssignmentConfiguration for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityAssignmentConfiguration -func (c *Athena) GetCapacityAssignmentConfiguration(input *GetCapacityAssignmentConfigurationInput) (*GetCapacityAssignmentConfigurationOutput, error) { - req, out := c.GetCapacityAssignmentConfigurationRequest(input) - return out, req.Send() -} - -// GetCapacityAssignmentConfigurationWithContext is the same as GetCapacityAssignmentConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See GetCapacityAssignmentConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetCapacityAssignmentConfigurationWithContext(ctx aws.Context, input *GetCapacityAssignmentConfigurationInput, opts ...request.Option) (*GetCapacityAssignmentConfigurationOutput, error) { - req, out := c.GetCapacityAssignmentConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetCapacityReservation = "GetCapacityReservation" - -// GetCapacityReservationRequest generates a "aws/request.Request" representing the -// client's request for the GetCapacityReservation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetCapacityReservation for more information on using the GetCapacityReservation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetCapacityReservationRequest method. -// req, resp := client.GetCapacityReservationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityReservation -func (c *Athena) GetCapacityReservationRequest(input *GetCapacityReservationInput) (req *request.Request, output *GetCapacityReservationOutput) { - op := &request.Operation{ - Name: opGetCapacityReservation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetCapacityReservationInput{} - } - - output = &GetCapacityReservationOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetCapacityReservation API operation for Amazon Athena. -// -// Returns information about the capacity reservation with the specified name. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetCapacityReservation for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityReservation -func (c *Athena) GetCapacityReservation(input *GetCapacityReservationInput) (*GetCapacityReservationOutput, error) { - req, out := c.GetCapacityReservationRequest(input) - return out, req.Send() -} - -// GetCapacityReservationWithContext is the same as GetCapacityReservation with the addition of -// the ability to pass a context and additional request options. -// -// See GetCapacityReservation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetCapacityReservationWithContext(ctx aws.Context, input *GetCapacityReservationInput, opts ...request.Option) (*GetCapacityReservationOutput, error) { - req, out := c.GetCapacityReservationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetDataCatalog = "GetDataCatalog" - -// GetDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the GetDataCatalog operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDataCatalog for more information on using the GetDataCatalog -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetDataCatalogRequest method. -// req, resp := client.GetDataCatalogRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog -func (c *Athena) GetDataCatalogRequest(input *GetDataCatalogInput) (req *request.Request, output *GetDataCatalogOutput) { - op := &request.Operation{ - Name: opGetDataCatalog, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetDataCatalogInput{} - } - - output = &GetDataCatalogOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDataCatalog API operation for Amazon Athena. -// -// Returns the specified data catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetDataCatalog for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog -func (c *Athena) GetDataCatalog(input *GetDataCatalogInput) (*GetDataCatalogOutput, error) { - req, out := c.GetDataCatalogRequest(input) - return out, req.Send() -} - -// GetDataCatalogWithContext is the same as GetDataCatalog with the addition of -// the ability to pass a context and additional request options. -// -// See GetDataCatalog for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetDataCatalogWithContext(ctx aws.Context, input *GetDataCatalogInput, opts ...request.Option) (*GetDataCatalogOutput, error) { - req, out := c.GetDataCatalogRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetDatabase = "GetDatabase" - -// GetDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the GetDatabase operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetDatabase for more information on using the GetDatabase -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetDatabaseRequest method. -// req, resp := client.GetDatabaseRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase -func (c *Athena) GetDatabaseRequest(input *GetDatabaseInput) (req *request.Request, output *GetDatabaseOutput) { - op := &request.Operation{ - Name: opGetDatabase, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetDatabaseInput{} - } - - output = &GetDatabaseOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetDatabase API operation for Amazon Athena. -// -// Returns a database object for the specified database and data catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetDatabase for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase -func (c *Athena) GetDatabase(input *GetDatabaseInput) (*GetDatabaseOutput, error) { - req, out := c.GetDatabaseRequest(input) - return out, req.Send() -} - -// GetDatabaseWithContext is the same as GetDatabase with the addition of -// the ability to pass a context and additional request options. -// -// See GetDatabase for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetDatabaseWithContext(ctx aws.Context, input *GetDatabaseInput, opts ...request.Option) (*GetDatabaseOutput, error) { - req, out := c.GetDatabaseRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetNamedQuery = "GetNamedQuery" - -// GetNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the GetNamedQuery operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetNamedQuery for more information on using the GetNamedQuery -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetNamedQueryRequest method. -// req, resp := client.GetNamedQueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery -func (c *Athena) GetNamedQueryRequest(input *GetNamedQueryInput) (req *request.Request, output *GetNamedQueryOutput) { - op := &request.Operation{ - Name: opGetNamedQuery, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetNamedQueryInput{} - } - - output = &GetNamedQueryOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetNamedQuery API operation for Amazon Athena. -// -// Returns information about a single query. Requires that you have access to -// the workgroup in which the query was saved. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetNamedQuery for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery -func (c *Athena) GetNamedQuery(input *GetNamedQueryInput) (*GetNamedQueryOutput, error) { - req, out := c.GetNamedQueryRequest(input) - return out, req.Send() -} - -// GetNamedQueryWithContext is the same as GetNamedQuery with the addition of -// the ability to pass a context and additional request options. -// -// See GetNamedQuery for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetNamedQueryWithContext(ctx aws.Context, input *GetNamedQueryInput, opts ...request.Option) (*GetNamedQueryOutput, error) { - req, out := c.GetNamedQueryRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetNotebookMetadata = "GetNotebookMetadata" - -// GetNotebookMetadataRequest generates a "aws/request.Request" representing the -// client's request for the GetNotebookMetadata operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetNotebookMetadata for more information on using the GetNotebookMetadata -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetNotebookMetadataRequest method. -// req, resp := client.GetNotebookMetadataRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNotebookMetadata -func (c *Athena) GetNotebookMetadataRequest(input *GetNotebookMetadataInput) (req *request.Request, output *GetNotebookMetadataOutput) { - op := &request.Operation{ - Name: opGetNotebookMetadata, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetNotebookMetadataInput{} - } - - output = &GetNotebookMetadataOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetNotebookMetadata API operation for Amazon Athena. -// -// Retrieves notebook metadata for the specified notebook ID. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetNotebookMetadata for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNotebookMetadata -func (c *Athena) GetNotebookMetadata(input *GetNotebookMetadataInput) (*GetNotebookMetadataOutput, error) { - req, out := c.GetNotebookMetadataRequest(input) - return out, req.Send() -} - -// GetNotebookMetadataWithContext is the same as GetNotebookMetadata with the addition of -// the ability to pass a context and additional request options. -// -// See GetNotebookMetadata for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetNotebookMetadataWithContext(ctx aws.Context, input *GetNotebookMetadataInput, opts ...request.Option) (*GetNotebookMetadataOutput, error) { - req, out := c.GetNotebookMetadataRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPreparedStatement = "GetPreparedStatement" - -// GetPreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the GetPreparedStatement operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPreparedStatement for more information on using the GetPreparedStatement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetPreparedStatementRequest method. -// req, resp := client.GetPreparedStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetPreparedStatement -func (c *Athena) GetPreparedStatementRequest(input *GetPreparedStatementInput) (req *request.Request, output *GetPreparedStatementOutput) { - op := &request.Operation{ - Name: opGetPreparedStatement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetPreparedStatementInput{} - } - - output = &GetPreparedStatementOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPreparedStatement API operation for Amazon Athena. -// -// Retrieves the prepared statement with the specified name from the specified -// workgroup. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetPreparedStatement for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetPreparedStatement -func (c *Athena) GetPreparedStatement(input *GetPreparedStatementInput) (*GetPreparedStatementOutput, error) { - req, out := c.GetPreparedStatementRequest(input) - return out, req.Send() -} - -// GetPreparedStatementWithContext is the same as GetPreparedStatement with the addition of -// the ability to pass a context and additional request options. -// -// See GetPreparedStatement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetPreparedStatementWithContext(ctx aws.Context, input *GetPreparedStatementInput, opts ...request.Option) (*GetPreparedStatementOutput, error) { - req, out := c.GetPreparedStatementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetQueryExecution = "GetQueryExecution" - -// GetQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the GetQueryExecution operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetQueryExecution for more information on using the GetQueryExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetQueryExecutionRequest method. -// req, resp := client.GetQueryExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution -func (c *Athena) GetQueryExecutionRequest(input *GetQueryExecutionInput) (req *request.Request, output *GetQueryExecutionOutput) { - op := &request.Operation{ - Name: opGetQueryExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetQueryExecutionInput{} - } - - output = &GetQueryExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetQueryExecution API operation for Amazon Athena. -// -// Returns information about a single execution of a query if you have access -// to the workgroup in which the query ran. Each time a query executes, information -// about the query execution is saved with a unique ID. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetQueryExecution for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution -func (c *Athena) GetQueryExecution(input *GetQueryExecutionInput) (*GetQueryExecutionOutput, error) { - req, out := c.GetQueryExecutionRequest(input) - return out, req.Send() -} - -// GetQueryExecutionWithContext is the same as GetQueryExecution with the addition of -// the ability to pass a context and additional request options. -// -// See GetQueryExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetQueryExecutionWithContext(ctx aws.Context, input *GetQueryExecutionInput, opts ...request.Option) (*GetQueryExecutionOutput, error) { - req, out := c.GetQueryExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetQueryResults = "GetQueryResults" - -// GetQueryResultsRequest generates a "aws/request.Request" representing the -// client's request for the GetQueryResults operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetQueryResults for more information on using the GetQueryResults -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetQueryResultsRequest method. -// req, resp := client.GetQueryResultsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults -func (c *Athena) GetQueryResultsRequest(input *GetQueryResultsInput) (req *request.Request, output *GetQueryResultsOutput) { - op := &request.Operation{ - Name: opGetQueryResults, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetQueryResultsInput{} - } - - output = &GetQueryResultsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetQueryResults API operation for Amazon Athena. -// -// Streams the results of a single query execution specified by QueryExecutionId -// from the Athena query results location in Amazon S3. For more information, -// see Working with query results, recent queries, and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html) -// in the Amazon Athena User Guide. This request does not execute the query -// but returns results. Use StartQueryExecution to run a query. -// -// To stream query results successfully, the IAM principal with permission to -// call GetQueryResults also must have permissions to the Amazon S3 GetObject -// action for the Athena query results location. -// -// IAM principals with permission to the Amazon S3 GetObject action for the -// query results location are able to retrieve query results from Amazon S3 -// even if permission to the GetQueryResults action is denied. To restrict user -// or role access, ensure that Amazon S3 permissions to the Athena query location -// are denied. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetQueryResults for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults -func (c *Athena) GetQueryResults(input *GetQueryResultsInput) (*GetQueryResultsOutput, error) { - req, out := c.GetQueryResultsRequest(input) - return out, req.Send() -} - -// GetQueryResultsWithContext is the same as GetQueryResults with the addition of -// the ability to pass a context and additional request options. -// -// See GetQueryResults for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetQueryResultsWithContext(ctx aws.Context, input *GetQueryResultsInput, opts ...request.Option) (*GetQueryResultsOutput, error) { - req, out := c.GetQueryResultsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// GetQueryResultsPages iterates over the pages of a GetQueryResults operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetQueryResults method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetQueryResults operation. -// pageNum := 0 -// err := client.GetQueryResultsPages(params, -// func(page *athena.GetQueryResultsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) GetQueryResultsPages(input *GetQueryResultsInput, fn func(*GetQueryResultsOutput, bool) bool) error { - return c.GetQueryResultsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetQueryResultsPagesWithContext same as GetQueryResultsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetQueryResultsPagesWithContext(ctx aws.Context, input *GetQueryResultsInput, fn func(*GetQueryResultsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetQueryResultsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetQueryResultsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*GetQueryResultsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opGetQueryRuntimeStatistics = "GetQueryRuntimeStatistics" - -// GetQueryRuntimeStatisticsRequest generates a "aws/request.Request" representing the -// client's request for the GetQueryRuntimeStatistics operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetQueryRuntimeStatistics for more information on using the GetQueryRuntimeStatistics -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetQueryRuntimeStatisticsRequest method. -// req, resp := client.GetQueryRuntimeStatisticsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatistics -func (c *Athena) GetQueryRuntimeStatisticsRequest(input *GetQueryRuntimeStatisticsInput) (req *request.Request, output *GetQueryRuntimeStatisticsOutput) { - op := &request.Operation{ - Name: opGetQueryRuntimeStatistics, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetQueryRuntimeStatisticsInput{} - } - - output = &GetQueryRuntimeStatisticsOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetQueryRuntimeStatistics API operation for Amazon Athena. -// -// Returns query execution runtime statistics related to a single execution -// of a query if you have access to the workgroup in which the query ran. Query -// execution runtime statistics are returned only when QueryExecutionStatus$State -// is in a SUCCEEDED or FAILED state. Stage-level input and output row count -// and data size statistics are not shown when a query has row-level filters -// defined in Lake Formation. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetQueryRuntimeStatistics for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatistics -func (c *Athena) GetQueryRuntimeStatistics(input *GetQueryRuntimeStatisticsInput) (*GetQueryRuntimeStatisticsOutput, error) { - req, out := c.GetQueryRuntimeStatisticsRequest(input) - return out, req.Send() -} - -// GetQueryRuntimeStatisticsWithContext is the same as GetQueryRuntimeStatistics with the addition of -// the ability to pass a context and additional request options. -// -// See GetQueryRuntimeStatistics for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetQueryRuntimeStatisticsWithContext(ctx aws.Context, input *GetQueryRuntimeStatisticsInput, opts ...request.Option) (*GetQueryRuntimeStatisticsOutput, error) { - req, out := c.GetQueryRuntimeStatisticsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSession = "GetSession" - -// GetSessionRequest generates a "aws/request.Request" representing the -// client's request for the GetSession operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSession for more information on using the GetSession -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetSessionRequest method. -// req, resp := client.GetSessionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSession -func (c *Athena) GetSessionRequest(input *GetSessionInput) (req *request.Request, output *GetSessionOutput) { - op := &request.Operation{ - Name: opGetSession, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionInput{} - } - - output = &GetSessionOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSession API operation for Amazon Athena. -// -// Gets the full details of a previously created session, including the session -// status and configuration. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetSession for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSession -func (c *Athena) GetSession(input *GetSessionInput) (*GetSessionOutput, error) { - req, out := c.GetSessionRequest(input) - return out, req.Send() -} - -// GetSessionWithContext is the same as GetSession with the addition of -// the ability to pass a context and additional request options. -// -// See GetSession for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetSessionWithContext(ctx aws.Context, input *GetSessionInput, opts ...request.Option) (*GetSessionOutput, error) { - req, out := c.GetSessionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetSessionStatus = "GetSessionStatus" - -// GetSessionStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetSessionStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetSessionStatus for more information on using the GetSessionStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetSessionStatusRequest method. -// req, resp := client.GetSessionStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSessionStatus -func (c *Athena) GetSessionStatusRequest(input *GetSessionStatusInput) (req *request.Request, output *GetSessionStatusOutput) { - op := &request.Operation{ - Name: opGetSessionStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetSessionStatusInput{} - } - - output = &GetSessionStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetSessionStatus API operation for Amazon Athena. -// -// Gets the current status of a session. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetSessionStatus for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSessionStatus -func (c *Athena) GetSessionStatus(input *GetSessionStatusInput) (*GetSessionStatusOutput, error) { - req, out := c.GetSessionStatusRequest(input) - return out, req.Send() -} - -// GetSessionStatusWithContext is the same as GetSessionStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetSessionStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetSessionStatusWithContext(ctx aws.Context, input *GetSessionStatusInput, opts ...request.Option) (*GetSessionStatusOutput, error) { - req, out := c.GetSessionStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetTableMetadata = "GetTableMetadata" - -// GetTableMetadataRequest generates a "aws/request.Request" representing the -// client's request for the GetTableMetadata operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetTableMetadata for more information on using the GetTableMetadata -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetTableMetadataRequest method. -// req, resp := client.GetTableMetadataRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata -func (c *Athena) GetTableMetadataRequest(input *GetTableMetadataInput) (req *request.Request, output *GetTableMetadataOutput) { - op := &request.Operation{ - Name: opGetTableMetadata, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetTableMetadataInput{} - } - - output = &GetTableMetadataOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetTableMetadata API operation for Amazon Athena. -// -// Returns table metadata for the specified catalog, database, and table. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetTableMetadata for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata -func (c *Athena) GetTableMetadata(input *GetTableMetadataInput) (*GetTableMetadataOutput, error) { - req, out := c.GetTableMetadataRequest(input) - return out, req.Send() -} - -// GetTableMetadataWithContext is the same as GetTableMetadata with the addition of -// the ability to pass a context and additional request options. -// -// See GetTableMetadata for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetTableMetadataWithContext(ctx aws.Context, input *GetTableMetadataInput, opts ...request.Option) (*GetTableMetadataOutput, error) { - req, out := c.GetTableMetadataRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetWorkGroup = "GetWorkGroup" - -// GetWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the GetWorkGroup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetWorkGroup for more information on using the GetWorkGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the GetWorkGroupRequest method. -// req, resp := client.GetWorkGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetWorkGroup -func (c *Athena) GetWorkGroupRequest(input *GetWorkGroupInput) (req *request.Request, output *GetWorkGroupOutput) { - op := &request.Operation{ - Name: opGetWorkGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetWorkGroupInput{} - } - - output = &GetWorkGroupOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetWorkGroup API operation for Amazon Athena. -// -// Returns information about the workgroup with the specified name. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation GetWorkGroup for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetWorkGroup -func (c *Athena) GetWorkGroup(input *GetWorkGroupInput) (*GetWorkGroupOutput, error) { - req, out := c.GetWorkGroupRequest(input) - return out, req.Send() -} - -// GetWorkGroupWithContext is the same as GetWorkGroup with the addition of -// the ability to pass a context and additional request options. -// -// See GetWorkGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetWorkGroupWithContext(ctx aws.Context, input *GetWorkGroupInput, opts ...request.Option) (*GetWorkGroupOutput, error) { - req, out := c.GetWorkGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opImportNotebook = "ImportNotebook" - -// ImportNotebookRequest generates a "aws/request.Request" representing the -// client's request for the ImportNotebook operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ImportNotebook for more information on using the ImportNotebook -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ImportNotebookRequest method. -// req, resp := client.ImportNotebookRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ImportNotebook -func (c *Athena) ImportNotebookRequest(input *ImportNotebookInput) (req *request.Request, output *ImportNotebookOutput) { - op := &request.Operation{ - Name: opImportNotebook, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ImportNotebookInput{} - } - - output = &ImportNotebookOutput{} - req = c.newRequest(op, input, output) - return -} - -// ImportNotebook API operation for Amazon Athena. -// -// Imports a single ipynb file to a Spark enabled workgroup. To import the notebook, -// the request must specify a value for either Payload or NoteBookS3LocationUri. -// If neither is specified or both are specified, an InvalidRequestException -// occurs. The maximum file size that can be imported is 10 megabytes. If an -// ipynb file with the same name already exists in the workgroup, throws an -// error. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ImportNotebook for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ImportNotebook -func (c *Athena) ImportNotebook(input *ImportNotebookInput) (*ImportNotebookOutput, error) { - req, out := c.ImportNotebookRequest(input) - return out, req.Send() -} - -// ImportNotebookWithContext is the same as ImportNotebook with the addition of -// the ability to pass a context and additional request options. -// -// See ImportNotebook for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ImportNotebookWithContext(ctx aws.Context, input *ImportNotebookInput, opts ...request.Option) (*ImportNotebookOutput, error) { - req, out := c.ImportNotebookRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListApplicationDPUSizes = "ListApplicationDPUSizes" - -// ListApplicationDPUSizesRequest generates a "aws/request.Request" representing the -// client's request for the ListApplicationDPUSizes operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListApplicationDPUSizes for more information on using the ListApplicationDPUSizes -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListApplicationDPUSizesRequest method. -// req, resp := client.ListApplicationDPUSizesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListApplicationDPUSizes -func (c *Athena) ListApplicationDPUSizesRequest(input *ListApplicationDPUSizesInput) (req *request.Request, output *ListApplicationDPUSizesOutput) { - op := &request.Operation{ - Name: opListApplicationDPUSizes, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListApplicationDPUSizesInput{} - } - - output = &ListApplicationDPUSizesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListApplicationDPUSizes API operation for Amazon Athena. -// -// Returns the supported DPU sizes for the supported application runtimes (for -// example, Athena notebook version 1). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListApplicationDPUSizes for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListApplicationDPUSizes -func (c *Athena) ListApplicationDPUSizes(input *ListApplicationDPUSizesInput) (*ListApplicationDPUSizesOutput, error) { - req, out := c.ListApplicationDPUSizesRequest(input) - return out, req.Send() -} - -// ListApplicationDPUSizesWithContext is the same as ListApplicationDPUSizes with the addition of -// the ability to pass a context and additional request options. -// -// See ListApplicationDPUSizes for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListApplicationDPUSizesWithContext(ctx aws.Context, input *ListApplicationDPUSizesInput, opts ...request.Option) (*ListApplicationDPUSizesOutput, error) { - req, out := c.ListApplicationDPUSizesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListApplicationDPUSizesPages iterates over the pages of a ListApplicationDPUSizes operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListApplicationDPUSizes method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListApplicationDPUSizes operation. -// pageNum := 0 -// err := client.ListApplicationDPUSizesPages(params, -// func(page *athena.ListApplicationDPUSizesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListApplicationDPUSizesPages(input *ListApplicationDPUSizesInput, fn func(*ListApplicationDPUSizesOutput, bool) bool) error { - return c.ListApplicationDPUSizesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListApplicationDPUSizesPagesWithContext same as ListApplicationDPUSizesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListApplicationDPUSizesPagesWithContext(ctx aws.Context, input *ListApplicationDPUSizesInput, fn func(*ListApplicationDPUSizesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListApplicationDPUSizesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListApplicationDPUSizesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListApplicationDPUSizesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListCalculationExecutions = "ListCalculationExecutions" - -// ListCalculationExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the ListCalculationExecutions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListCalculationExecutions for more information on using the ListCalculationExecutions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListCalculationExecutionsRequest method. -// req, resp := client.ListCalculationExecutionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCalculationExecutions -func (c *Athena) ListCalculationExecutionsRequest(input *ListCalculationExecutionsInput) (req *request.Request, output *ListCalculationExecutionsOutput) { - op := &request.Operation{ - Name: opListCalculationExecutions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListCalculationExecutionsInput{} - } - - output = &ListCalculationExecutionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListCalculationExecutions API operation for Amazon Athena. -// -// Lists the calculations that have been submitted to a session in descending -// order. Newer calculations are listed first; older calculations are listed -// later. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListCalculationExecutions for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCalculationExecutions -func (c *Athena) ListCalculationExecutions(input *ListCalculationExecutionsInput) (*ListCalculationExecutionsOutput, error) { - req, out := c.ListCalculationExecutionsRequest(input) - return out, req.Send() -} - -// ListCalculationExecutionsWithContext is the same as ListCalculationExecutions with the addition of -// the ability to pass a context and additional request options. -// -// See ListCalculationExecutions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListCalculationExecutionsWithContext(ctx aws.Context, input *ListCalculationExecutionsInput, opts ...request.Option) (*ListCalculationExecutionsOutput, error) { - req, out := c.ListCalculationExecutionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListCalculationExecutionsPages iterates over the pages of a ListCalculationExecutions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListCalculationExecutions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListCalculationExecutions operation. -// pageNum := 0 -// err := client.ListCalculationExecutionsPages(params, -// func(page *athena.ListCalculationExecutionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListCalculationExecutionsPages(input *ListCalculationExecutionsInput, fn func(*ListCalculationExecutionsOutput, bool) bool) error { - return c.ListCalculationExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListCalculationExecutionsPagesWithContext same as ListCalculationExecutionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListCalculationExecutionsPagesWithContext(ctx aws.Context, input *ListCalculationExecutionsInput, fn func(*ListCalculationExecutionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListCalculationExecutionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListCalculationExecutionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListCalculationExecutionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListCapacityReservations = "ListCapacityReservations" - -// ListCapacityReservationsRequest generates a "aws/request.Request" representing the -// client's request for the ListCapacityReservations operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListCapacityReservations for more information on using the ListCapacityReservations -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListCapacityReservationsRequest method. -// req, resp := client.ListCapacityReservationsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCapacityReservations -func (c *Athena) ListCapacityReservationsRequest(input *ListCapacityReservationsInput) (req *request.Request, output *ListCapacityReservationsOutput) { - op := &request.Operation{ - Name: opListCapacityReservations, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListCapacityReservationsInput{} - } - - output = &ListCapacityReservationsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListCapacityReservations API operation for Amazon Athena. -// -// Lists the capacity reservations for the current account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListCapacityReservations for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCapacityReservations -func (c *Athena) ListCapacityReservations(input *ListCapacityReservationsInput) (*ListCapacityReservationsOutput, error) { - req, out := c.ListCapacityReservationsRequest(input) - return out, req.Send() -} - -// ListCapacityReservationsWithContext is the same as ListCapacityReservations with the addition of -// the ability to pass a context and additional request options. -// -// See ListCapacityReservations for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListCapacityReservationsWithContext(ctx aws.Context, input *ListCapacityReservationsInput, opts ...request.Option) (*ListCapacityReservationsOutput, error) { - req, out := c.ListCapacityReservationsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListCapacityReservationsPages iterates over the pages of a ListCapacityReservations operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListCapacityReservations method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListCapacityReservations operation. -// pageNum := 0 -// err := client.ListCapacityReservationsPages(params, -// func(page *athena.ListCapacityReservationsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListCapacityReservationsPages(input *ListCapacityReservationsInput, fn func(*ListCapacityReservationsOutput, bool) bool) error { - return c.ListCapacityReservationsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListCapacityReservationsPagesWithContext same as ListCapacityReservationsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListCapacityReservationsPagesWithContext(ctx aws.Context, input *ListCapacityReservationsInput, fn func(*ListCapacityReservationsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListCapacityReservationsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListCapacityReservationsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListCapacityReservationsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListDataCatalogs = "ListDataCatalogs" - -// ListDataCatalogsRequest generates a "aws/request.Request" representing the -// client's request for the ListDataCatalogs operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDataCatalogs for more information on using the ListDataCatalogs -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListDataCatalogsRequest method. -// req, resp := client.ListDataCatalogsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs -func (c *Athena) ListDataCatalogsRequest(input *ListDataCatalogsInput) (req *request.Request, output *ListDataCatalogsOutput) { - op := &request.Operation{ - Name: opListDataCatalogs, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListDataCatalogsInput{} - } - - output = &ListDataCatalogsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListDataCatalogs API operation for Amazon Athena. -// -// Lists the data catalogs in the current Amazon Web Services account. -// -// In the Athena console, data catalogs are listed as "data sources" on the -// Data sources page under the Data source name column. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListDataCatalogs for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs -func (c *Athena) ListDataCatalogs(input *ListDataCatalogsInput) (*ListDataCatalogsOutput, error) { - req, out := c.ListDataCatalogsRequest(input) - return out, req.Send() -} - -// ListDataCatalogsWithContext is the same as ListDataCatalogs with the addition of -// the ability to pass a context and additional request options. -// -// See ListDataCatalogs for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListDataCatalogsWithContext(ctx aws.Context, input *ListDataCatalogsInput, opts ...request.Option) (*ListDataCatalogsOutput, error) { - req, out := c.ListDataCatalogsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListDataCatalogsPages iterates over the pages of a ListDataCatalogs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListDataCatalogs method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDataCatalogs operation. -// pageNum := 0 -// err := client.ListDataCatalogsPages(params, -// func(page *athena.ListDataCatalogsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListDataCatalogsPages(input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool) error { - return c.ListDataCatalogsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDataCatalogsPagesWithContext same as ListDataCatalogsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListDataCatalogsPagesWithContext(ctx aws.Context, input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDataCatalogsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDataCatalogsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDataCatalogsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListDatabases = "ListDatabases" - -// ListDatabasesRequest generates a "aws/request.Request" representing the -// client's request for the ListDatabases operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDatabases for more information on using the ListDatabases -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListDatabasesRequest method. -// req, resp := client.ListDatabasesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases -func (c *Athena) ListDatabasesRequest(input *ListDatabasesInput) (req *request.Request, output *ListDatabasesOutput) { - op := &request.Operation{ - Name: opListDatabases, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListDatabasesInput{} - } - - output = &ListDatabasesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListDatabases API operation for Amazon Athena. -// -// Lists the databases in the specified data catalog. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListDatabases for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases -func (c *Athena) ListDatabases(input *ListDatabasesInput) (*ListDatabasesOutput, error) { - req, out := c.ListDatabasesRequest(input) - return out, req.Send() -} - -// ListDatabasesWithContext is the same as ListDatabases with the addition of -// the ability to pass a context and additional request options. -// -// See ListDatabases for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListDatabasesWithContext(ctx aws.Context, input *ListDatabasesInput, opts ...request.Option) (*ListDatabasesOutput, error) { - req, out := c.ListDatabasesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListDatabasesPages iterates over the pages of a ListDatabases operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListDatabases method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDatabases operation. -// pageNum := 0 -// err := client.ListDatabasesPages(params, -// func(page *athena.ListDatabasesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListDatabasesPages(input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool) error { - return c.ListDatabasesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDatabasesPagesWithContext same as ListDatabasesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListDatabasesPagesWithContext(ctx aws.Context, input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDatabasesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDatabasesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDatabasesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListEngineVersions = "ListEngineVersions" - -// ListEngineVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListEngineVersions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListEngineVersions for more information on using the ListEngineVersions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListEngineVersionsRequest method. -// req, resp := client.ListEngineVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListEngineVersions -func (c *Athena) ListEngineVersionsRequest(input *ListEngineVersionsInput) (req *request.Request, output *ListEngineVersionsOutput) { - op := &request.Operation{ - Name: opListEngineVersions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListEngineVersionsInput{} - } - - output = &ListEngineVersionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListEngineVersions API operation for Amazon Athena. -// -// Returns a list of engine versions that are available to choose from, including -// the Auto option. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListEngineVersions for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListEngineVersions -func (c *Athena) ListEngineVersions(input *ListEngineVersionsInput) (*ListEngineVersionsOutput, error) { - req, out := c.ListEngineVersionsRequest(input) - return out, req.Send() -} - -// ListEngineVersionsWithContext is the same as ListEngineVersions with the addition of -// the ability to pass a context and additional request options. -// -// See ListEngineVersions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListEngineVersionsWithContext(ctx aws.Context, input *ListEngineVersionsInput, opts ...request.Option) (*ListEngineVersionsOutput, error) { - req, out := c.ListEngineVersionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListEngineVersionsPages iterates over the pages of a ListEngineVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListEngineVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListEngineVersions operation. -// pageNum := 0 -// err := client.ListEngineVersionsPages(params, -// func(page *athena.ListEngineVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListEngineVersionsPages(input *ListEngineVersionsInput, fn func(*ListEngineVersionsOutput, bool) bool) error { - return c.ListEngineVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListEngineVersionsPagesWithContext same as ListEngineVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListEngineVersionsPagesWithContext(ctx aws.Context, input *ListEngineVersionsInput, fn func(*ListEngineVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListEngineVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListEngineVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListEngineVersionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListExecutors = "ListExecutors" - -// ListExecutorsRequest generates a "aws/request.Request" representing the -// client's request for the ListExecutors operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListExecutors for more information on using the ListExecutors -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListExecutorsRequest method. -// req, resp := client.ListExecutorsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListExecutors -func (c *Athena) ListExecutorsRequest(input *ListExecutorsInput) (req *request.Request, output *ListExecutorsOutput) { - op := &request.Operation{ - Name: opListExecutors, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListExecutorsInput{} - } - - output = &ListExecutorsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListExecutors API operation for Amazon Athena. -// -// Lists, in descending order, the executors that joined a session. Newer executors -// are listed first; older executors are listed later. The result can be optionally -// filtered by state. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListExecutors for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListExecutors -func (c *Athena) ListExecutors(input *ListExecutorsInput) (*ListExecutorsOutput, error) { - req, out := c.ListExecutorsRequest(input) - return out, req.Send() -} - -// ListExecutorsWithContext is the same as ListExecutors with the addition of -// the ability to pass a context and additional request options. -// -// See ListExecutors for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListExecutorsWithContext(ctx aws.Context, input *ListExecutorsInput, opts ...request.Option) (*ListExecutorsOutput, error) { - req, out := c.ListExecutorsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListExecutorsPages iterates over the pages of a ListExecutors operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListExecutors method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListExecutors operation. -// pageNum := 0 -// err := client.ListExecutorsPages(params, -// func(page *athena.ListExecutorsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListExecutorsPages(input *ListExecutorsInput, fn func(*ListExecutorsOutput, bool) bool) error { - return c.ListExecutorsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListExecutorsPagesWithContext same as ListExecutorsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListExecutorsPagesWithContext(ctx aws.Context, input *ListExecutorsInput, fn func(*ListExecutorsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListExecutorsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListExecutorsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListExecutorsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListNamedQueries = "ListNamedQueries" - -// ListNamedQueriesRequest generates a "aws/request.Request" representing the -// client's request for the ListNamedQueries operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListNamedQueries for more information on using the ListNamedQueries -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListNamedQueriesRequest method. -// req, resp := client.ListNamedQueriesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries -func (c *Athena) ListNamedQueriesRequest(input *ListNamedQueriesInput) (req *request.Request, output *ListNamedQueriesOutput) { - op := &request.Operation{ - Name: opListNamedQueries, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListNamedQueriesInput{} - } - - output = &ListNamedQueriesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListNamedQueries API operation for Amazon Athena. -// -// Provides a list of available query IDs only for queries saved in the specified -// workgroup. Requires that you have access to the specified workgroup. If a -// workgroup is not specified, lists the saved queries for the primary workgroup. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListNamedQueries for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries -func (c *Athena) ListNamedQueries(input *ListNamedQueriesInput) (*ListNamedQueriesOutput, error) { - req, out := c.ListNamedQueriesRequest(input) - return out, req.Send() -} - -// ListNamedQueriesWithContext is the same as ListNamedQueries with the addition of -// the ability to pass a context and additional request options. -// -// See ListNamedQueries for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListNamedQueriesWithContext(ctx aws.Context, input *ListNamedQueriesInput, opts ...request.Option) (*ListNamedQueriesOutput, error) { - req, out := c.ListNamedQueriesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListNamedQueriesPages iterates over the pages of a ListNamedQueries operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListNamedQueries method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListNamedQueries operation. -// pageNum := 0 -// err := client.ListNamedQueriesPages(params, -// func(page *athena.ListNamedQueriesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListNamedQueriesPages(input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool) error { - return c.ListNamedQueriesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListNamedQueriesPagesWithContext same as ListNamedQueriesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListNamedQueriesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListNamedQueriesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListNamedQueriesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListNotebookMetadata = "ListNotebookMetadata" - -// ListNotebookMetadataRequest generates a "aws/request.Request" representing the -// client's request for the ListNotebookMetadata operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListNotebookMetadata for more information on using the ListNotebookMetadata -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListNotebookMetadataRequest method. -// req, resp := client.ListNotebookMetadataRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookMetadata -func (c *Athena) ListNotebookMetadataRequest(input *ListNotebookMetadataInput) (req *request.Request, output *ListNotebookMetadataOutput) { - op := &request.Operation{ - Name: opListNotebookMetadata, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListNotebookMetadataInput{} - } - - output = &ListNotebookMetadataOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListNotebookMetadata API operation for Amazon Athena. -// -// Displays the notebook files for the specified workgroup in paginated format. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListNotebookMetadata for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookMetadata -func (c *Athena) ListNotebookMetadata(input *ListNotebookMetadataInput) (*ListNotebookMetadataOutput, error) { - req, out := c.ListNotebookMetadataRequest(input) - return out, req.Send() -} - -// ListNotebookMetadataWithContext is the same as ListNotebookMetadata with the addition of -// the ability to pass a context and additional request options. -// -// See ListNotebookMetadata for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListNotebookMetadataWithContext(ctx aws.Context, input *ListNotebookMetadataInput, opts ...request.Option) (*ListNotebookMetadataOutput, error) { - req, out := c.ListNotebookMetadataRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListNotebookSessions = "ListNotebookSessions" - -// ListNotebookSessionsRequest generates a "aws/request.Request" representing the -// client's request for the ListNotebookSessions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListNotebookSessions for more information on using the ListNotebookSessions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListNotebookSessionsRequest method. -// req, resp := client.ListNotebookSessionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookSessions -func (c *Athena) ListNotebookSessionsRequest(input *ListNotebookSessionsInput) (req *request.Request, output *ListNotebookSessionsOutput) { - op := &request.Operation{ - Name: opListNotebookSessions, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListNotebookSessionsInput{} - } - - output = &ListNotebookSessionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListNotebookSessions API operation for Amazon Athena. -// -// Lists, in descending order, the sessions that have been created in a notebook -// that are in an active state like CREATING, CREATED, IDLE or BUSY. Newer sessions -// are listed first; older sessions are listed later. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListNotebookSessions for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookSessions -func (c *Athena) ListNotebookSessions(input *ListNotebookSessionsInput) (*ListNotebookSessionsOutput, error) { - req, out := c.ListNotebookSessionsRequest(input) - return out, req.Send() -} - -// ListNotebookSessionsWithContext is the same as ListNotebookSessions with the addition of -// the ability to pass a context and additional request options. -// -// See ListNotebookSessions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListNotebookSessionsWithContext(ctx aws.Context, input *ListNotebookSessionsInput, opts ...request.Option) (*ListNotebookSessionsOutput, error) { - req, out := c.ListNotebookSessionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListPreparedStatements = "ListPreparedStatements" - -// ListPreparedStatementsRequest generates a "aws/request.Request" representing the -// client's request for the ListPreparedStatements operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListPreparedStatements for more information on using the ListPreparedStatements -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListPreparedStatementsRequest method. -// req, resp := client.ListPreparedStatementsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListPreparedStatements -func (c *Athena) ListPreparedStatementsRequest(input *ListPreparedStatementsInput) (req *request.Request, output *ListPreparedStatementsOutput) { - op := &request.Operation{ - Name: opListPreparedStatements, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListPreparedStatementsInput{} - } - - output = &ListPreparedStatementsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListPreparedStatements API operation for Amazon Athena. -// -// Lists the prepared statements in the specified workgroup. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListPreparedStatements for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListPreparedStatements -func (c *Athena) ListPreparedStatements(input *ListPreparedStatementsInput) (*ListPreparedStatementsOutput, error) { - req, out := c.ListPreparedStatementsRequest(input) - return out, req.Send() -} - -// ListPreparedStatementsWithContext is the same as ListPreparedStatements with the addition of -// the ability to pass a context and additional request options. -// -// See ListPreparedStatements for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListPreparedStatementsWithContext(ctx aws.Context, input *ListPreparedStatementsInput, opts ...request.Option) (*ListPreparedStatementsOutput, error) { - req, out := c.ListPreparedStatementsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListPreparedStatementsPages iterates over the pages of a ListPreparedStatements operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListPreparedStatements method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListPreparedStatements operation. -// pageNum := 0 -// err := client.ListPreparedStatementsPages(params, -// func(page *athena.ListPreparedStatementsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListPreparedStatementsPages(input *ListPreparedStatementsInput, fn func(*ListPreparedStatementsOutput, bool) bool) error { - return c.ListPreparedStatementsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListPreparedStatementsPagesWithContext same as ListPreparedStatementsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListPreparedStatementsPagesWithContext(ctx aws.Context, input *ListPreparedStatementsInput, fn func(*ListPreparedStatementsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListPreparedStatementsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListPreparedStatementsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListPreparedStatementsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListQueryExecutions = "ListQueryExecutions" - -// ListQueryExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the ListQueryExecutions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListQueryExecutions for more information on using the ListQueryExecutions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListQueryExecutionsRequest method. -// req, resp := client.ListQueryExecutionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions -func (c *Athena) ListQueryExecutionsRequest(input *ListQueryExecutionsInput) (req *request.Request, output *ListQueryExecutionsOutput) { - op := &request.Operation{ - Name: opListQueryExecutions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListQueryExecutionsInput{} - } - - output = &ListQueryExecutionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListQueryExecutions API operation for Amazon Athena. -// -// Provides a list of available query execution IDs for the queries in the specified -// workgroup. Athena keeps a query history for 45 days. If a workgroup is not -// specified, returns a list of query execution IDs for the primary workgroup. -// Requires you to have access to the workgroup in which the queries ran. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListQueryExecutions for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions -func (c *Athena) ListQueryExecutions(input *ListQueryExecutionsInput) (*ListQueryExecutionsOutput, error) { - req, out := c.ListQueryExecutionsRequest(input) - return out, req.Send() -} - -// ListQueryExecutionsWithContext is the same as ListQueryExecutions with the addition of -// the ability to pass a context and additional request options. -// -// See ListQueryExecutions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListQueryExecutionsWithContext(ctx aws.Context, input *ListQueryExecutionsInput, opts ...request.Option) (*ListQueryExecutionsOutput, error) { - req, out := c.ListQueryExecutionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListQueryExecutionsPages iterates over the pages of a ListQueryExecutions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListQueryExecutions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListQueryExecutions operation. -// pageNum := 0 -// err := client.ListQueryExecutionsPages(params, -// func(page *athena.ListQueryExecutionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListQueryExecutionsPages(input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool) error { - return c.ListQueryExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListQueryExecutionsPagesWithContext same as ListQueryExecutionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListQueryExecutionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListQueryExecutionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListQueryExecutionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListSessions = "ListSessions" - -// ListSessionsRequest generates a "aws/request.Request" representing the -// client's request for the ListSessions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListSessions for more information on using the ListSessions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListSessionsRequest method. -// req, resp := client.ListSessionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListSessions -func (c *Athena) ListSessionsRequest(input *ListSessionsInput) (req *request.Request, output *ListSessionsOutput) { - op := &request.Operation{ - Name: opListSessions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListSessionsInput{} - } - - output = &ListSessionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListSessions API operation for Amazon Athena. -// -// Lists the sessions in a workgroup that are in an active state like CREATING, -// CREATED, IDLE, or BUSY. Newer sessions are listed first; older sessions are -// listed later. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListSessions for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListSessions -func (c *Athena) ListSessions(input *ListSessionsInput) (*ListSessionsOutput, error) { - req, out := c.ListSessionsRequest(input) - return out, req.Send() -} - -// ListSessionsWithContext is the same as ListSessions with the addition of -// the ability to pass a context and additional request options. -// -// See ListSessions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListSessionsWithContext(ctx aws.Context, input *ListSessionsInput, opts ...request.Option) (*ListSessionsOutput, error) { - req, out := c.ListSessionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListSessionsPages iterates over the pages of a ListSessions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListSessions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListSessions operation. -// pageNum := 0 -// err := client.ListSessionsPages(params, -// func(page *athena.ListSessionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListSessionsPages(input *ListSessionsInput, fn func(*ListSessionsOutput, bool) bool) error { - return c.ListSessionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListSessionsPagesWithContext same as ListSessionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListSessionsPagesWithContext(ctx aws.Context, input *ListSessionsInput, fn func(*ListSessionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListSessionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListSessionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListSessionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTableMetadata = "ListTableMetadata" - -// ListTableMetadataRequest generates a "aws/request.Request" representing the -// client's request for the ListTableMetadata operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTableMetadata for more information on using the ListTableMetadata -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListTableMetadataRequest method. -// req, resp := client.ListTableMetadataRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata -func (c *Athena) ListTableMetadataRequest(input *ListTableMetadataInput) (req *request.Request, output *ListTableMetadataOutput) { - op := &request.Operation{ - Name: opListTableMetadata, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListTableMetadataInput{} - } - - output = &ListTableMetadataOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTableMetadata API operation for Amazon Athena. -// -// Lists the metadata for the tables in the specified data catalog database. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListTableMetadata for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata -func (c *Athena) ListTableMetadata(input *ListTableMetadataInput) (*ListTableMetadataOutput, error) { - req, out := c.ListTableMetadataRequest(input) - return out, req.Send() -} - -// ListTableMetadataWithContext is the same as ListTableMetadata with the addition of -// the ability to pass a context and additional request options. -// -// See ListTableMetadata for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListTableMetadataWithContext(ctx aws.Context, input *ListTableMetadataInput, opts ...request.Option) (*ListTableMetadataOutput, error) { - req, out := c.ListTableMetadataRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListTableMetadataPages iterates over the pages of a ListTableMetadata operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTableMetadata method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTableMetadata operation. -// pageNum := 0 -// err := client.ListTableMetadataPages(params, -// func(page *athena.ListTableMetadataOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListTableMetadataPages(input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool) error { - return c.ListTableMetadataPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTableMetadataPagesWithContext same as ListTableMetadataPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListTableMetadataPagesWithContext(ctx aws.Context, input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTableMetadataInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTableMetadataRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTableMetadataOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTagsForResource = "ListTagsForResource" - -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTagsForResource for more information on using the ListTagsForResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource -func (c *Athena) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { - op := &request.Operation{ - Name: opListTagsForResource, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListTagsForResourceInput{} - } - - output = &ListTagsForResourceOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListTagsForResource API operation for Amazon Athena. -// -// Lists the tags associated with an Athena resource. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListTagsForResource for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource -func (c *Athena) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) - return out, req.Send() -} - -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of -// the ability to pass a context and additional request options. -// -// See ListTagsForResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTagsForResource method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTagsForResource operation. -// pageNum := 0 -// err := client.ListTagsForResourcePages(params, -// func(page *athena.ListTagsForResourceOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { - return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTagsForResourceInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTagsForResourceRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListWorkGroups = "ListWorkGroups" - -// ListWorkGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListWorkGroups operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListWorkGroups for more information on using the ListWorkGroups -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the ListWorkGroupsRequest method. -// req, resp := client.ListWorkGroupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListWorkGroups -func (c *Athena) ListWorkGroupsRequest(input *ListWorkGroupsInput) (req *request.Request, output *ListWorkGroupsOutput) { - op := &request.Operation{ - Name: opListWorkGroups, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &ListWorkGroupsInput{} - } - - output = &ListWorkGroupsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListWorkGroups API operation for Amazon Athena. -// -// Lists available workgroups for the account. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation ListWorkGroups for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListWorkGroups -func (c *Athena) ListWorkGroups(input *ListWorkGroupsInput) (*ListWorkGroupsOutput, error) { - req, out := c.ListWorkGroupsRequest(input) - return out, req.Send() -} - -// ListWorkGroupsWithContext is the same as ListWorkGroups with the addition of -// the ability to pass a context and additional request options. -// -// See ListWorkGroups for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListWorkGroupsWithContext(ctx aws.Context, input *ListWorkGroupsInput, opts ...request.Option) (*ListWorkGroupsOutput, error) { - req, out := c.ListWorkGroupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListWorkGroupsPages iterates over the pages of a ListWorkGroups operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListWorkGroups method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListWorkGroups operation. -// pageNum := 0 -// err := client.ListWorkGroupsPages(params, -// func(page *athena.ListWorkGroupsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -func (c *Athena) ListWorkGroupsPages(input *ListWorkGroupsInput, fn func(*ListWorkGroupsOutput, bool) bool) error { - return c.ListWorkGroupsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListWorkGroupsPagesWithContext same as ListWorkGroupsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListWorkGroupsPagesWithContext(ctx aws.Context, input *ListWorkGroupsInput, fn func(*ListWorkGroupsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListWorkGroupsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListWorkGroupsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListWorkGroupsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opPutCapacityAssignmentConfiguration = "PutCapacityAssignmentConfiguration" - -// PutCapacityAssignmentConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutCapacityAssignmentConfiguration operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutCapacityAssignmentConfiguration for more information on using the PutCapacityAssignmentConfiguration -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the PutCapacityAssignmentConfigurationRequest method. -// req, resp := client.PutCapacityAssignmentConfigurationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/PutCapacityAssignmentConfiguration -func (c *Athena) PutCapacityAssignmentConfigurationRequest(input *PutCapacityAssignmentConfigurationInput) (req *request.Request, output *PutCapacityAssignmentConfigurationOutput) { - op := &request.Operation{ - Name: opPutCapacityAssignmentConfiguration, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutCapacityAssignmentConfigurationInput{} - } - - output = &PutCapacityAssignmentConfigurationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutCapacityAssignmentConfiguration API operation for Amazon Athena. -// -// Puts a new capacity assignment configuration for a specified capacity reservation. -// If a capacity assignment configuration already exists for the capacity reservation, -// replaces the existing capacity assignment configuration. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation PutCapacityAssignmentConfiguration for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/PutCapacityAssignmentConfiguration -func (c *Athena) PutCapacityAssignmentConfiguration(input *PutCapacityAssignmentConfigurationInput) (*PutCapacityAssignmentConfigurationOutput, error) { - req, out := c.PutCapacityAssignmentConfigurationRequest(input) - return out, req.Send() -} - -// PutCapacityAssignmentConfigurationWithContext is the same as PutCapacityAssignmentConfiguration with the addition of -// the ability to pass a context and additional request options. -// -// See PutCapacityAssignmentConfiguration for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) PutCapacityAssignmentConfigurationWithContext(ctx aws.Context, input *PutCapacityAssignmentConfigurationInput, opts ...request.Option) (*PutCapacityAssignmentConfigurationOutput, error) { - req, out := c.PutCapacityAssignmentConfigurationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartCalculationExecution = "StartCalculationExecution" - -// StartCalculationExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StartCalculationExecution operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartCalculationExecution for more information on using the StartCalculationExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the StartCalculationExecutionRequest method. -// req, resp := client.StartCalculationExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartCalculationExecution -func (c *Athena) StartCalculationExecutionRequest(input *StartCalculationExecutionInput) (req *request.Request, output *StartCalculationExecutionOutput) { - op := &request.Operation{ - Name: opStartCalculationExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartCalculationExecutionInput{} - } - - output = &StartCalculationExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartCalculationExecution API operation for Amazon Athena. -// -// Submits calculations for execution within a session. You can supply the code -// to run as an inline code block within the request. -// -// The request syntax requires the StartCalculationExecutionRequest$CodeBlock -// parameter or the CalculationConfiguration$CodeBlock parameter, but not both. -// Because CalculationConfiguration$CodeBlock is deprecated, use the StartCalculationExecutionRequest$CodeBlock -// parameter instead. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation StartCalculationExecution for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartCalculationExecution -func (c *Athena) StartCalculationExecution(input *StartCalculationExecutionInput) (*StartCalculationExecutionOutput, error) { - req, out := c.StartCalculationExecutionRequest(input) - return out, req.Send() -} - -// StartCalculationExecutionWithContext is the same as StartCalculationExecution with the addition of -// the ability to pass a context and additional request options. -// -// See StartCalculationExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) StartCalculationExecutionWithContext(ctx aws.Context, input *StartCalculationExecutionInput, opts ...request.Option) (*StartCalculationExecutionOutput, error) { - req, out := c.StartCalculationExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartQueryExecution = "StartQueryExecution" - -// StartQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StartQueryExecution operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartQueryExecution for more information on using the StartQueryExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the StartQueryExecutionRequest method. -// req, resp := client.StartQueryExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartQueryExecution -func (c *Athena) StartQueryExecutionRequest(input *StartQueryExecutionInput) (req *request.Request, output *StartQueryExecutionOutput) { - op := &request.Operation{ - Name: opStartQueryExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartQueryExecutionInput{} - } - - output = &StartQueryExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartQueryExecution API operation for Amazon Athena. -// -// Runs the SQL query statements contained in the Query. Requires you to have -// access to the workgroup in which the query ran. Running queries against an -// external catalog requires GetDataCatalog permission to the catalog. For code -// samples using the Amazon Web Services SDK for Java, see Examples and Code -// Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in -// the Amazon Athena User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation StartQueryExecution for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartQueryExecution -func (c *Athena) StartQueryExecution(input *StartQueryExecutionInput) (*StartQueryExecutionOutput, error) { - req, out := c.StartQueryExecutionRequest(input) - return out, req.Send() -} - -// StartQueryExecutionWithContext is the same as StartQueryExecution with the addition of -// the ability to pass a context and additional request options. -// -// See StartQueryExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) StartQueryExecutionWithContext(ctx aws.Context, input *StartQueryExecutionInput, opts ...request.Option) (*StartQueryExecutionOutput, error) { - req, out := c.StartQueryExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStartSession = "StartSession" - -// StartSessionRequest generates a "aws/request.Request" representing the -// client's request for the StartSession operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StartSession for more information on using the StartSession -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the StartSessionRequest method. -// req, resp := client.StartSessionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartSession -func (c *Athena) StartSessionRequest(input *StartSessionInput) (req *request.Request, output *StartSessionOutput) { - op := &request.Operation{ - Name: opStartSession, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StartSessionInput{} - } - - output = &StartSessionOutput{} - req = c.newRequest(op, input, output) - return -} - -// StartSession API operation for Amazon Athena. -// -// Creates a session for running calculations within a workgroup. The session -// is ready when it reaches an IDLE state. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation StartSession for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// - SessionAlreadyExistsException -// The specified session already exists. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartSession -func (c *Athena) StartSession(input *StartSessionInput) (*StartSessionOutput, error) { - req, out := c.StartSessionRequest(input) - return out, req.Send() -} - -// StartSessionWithContext is the same as StartSession with the addition of -// the ability to pass a context and additional request options. -// -// See StartSession for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) StartSessionWithContext(ctx aws.Context, input *StartSessionInput, opts ...request.Option) (*StartSessionOutput, error) { - req, out := c.StartSessionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopCalculationExecution = "StopCalculationExecution" - -// StopCalculationExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StopCalculationExecution operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopCalculationExecution for more information on using the StopCalculationExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the StopCalculationExecutionRequest method. -// req, resp := client.StopCalculationExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopCalculationExecution -func (c *Athena) StopCalculationExecutionRequest(input *StopCalculationExecutionInput) (req *request.Request, output *StopCalculationExecutionOutput) { - op := &request.Operation{ - Name: opStopCalculationExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopCalculationExecutionInput{} - } - - output = &StopCalculationExecutionOutput{} - req = c.newRequest(op, input, output) - return -} - -// StopCalculationExecution API operation for Amazon Athena. -// -// Requests the cancellation of a calculation. A StopCalculationExecution call -// on a calculation that is already in a terminal state (for example, STOPPED, -// FAILED, or COMPLETED) succeeds but has no effect. -// -// Cancelling a calculation is done on a best effort basis. If a calculation -// cannot be cancelled, you can be charged for its completion. If you are concerned -// about being charged for a calculation that cannot be cancelled, consider -// terminating the session in which the calculation is running. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation StopCalculationExecution for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopCalculationExecution -func (c *Athena) StopCalculationExecution(input *StopCalculationExecutionInput) (*StopCalculationExecutionOutput, error) { - req, out := c.StopCalculationExecutionRequest(input) - return out, req.Send() -} - -// StopCalculationExecutionWithContext is the same as StopCalculationExecution with the addition of -// the ability to pass a context and additional request options. -// -// See StopCalculationExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) StopCalculationExecutionWithContext(ctx aws.Context, input *StopCalculationExecutionInput, opts ...request.Option) (*StopCalculationExecutionOutput, error) { - req, out := c.StopCalculationExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opStopQueryExecution = "StopQueryExecution" - -// StopQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StopQueryExecution operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See StopQueryExecution for more information on using the StopQueryExecution -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the StopQueryExecutionRequest method. -// req, resp := client.StopQueryExecutionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopQueryExecution -func (c *Athena) StopQueryExecutionRequest(input *StopQueryExecutionInput) (req *request.Request, output *StopQueryExecutionOutput) { - op := &request.Operation{ - Name: opStopQueryExecution, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &StopQueryExecutionInput{} - } - - output = &StopQueryExecutionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// StopQueryExecution API operation for Amazon Athena. -// -// Stops a query execution. Requires you to have access to the workgroup in -// which the query ran. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation StopQueryExecution for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopQueryExecution -func (c *Athena) StopQueryExecution(input *StopQueryExecutionInput) (*StopQueryExecutionOutput, error) { - req, out := c.StopQueryExecutionRequest(input) - return out, req.Send() -} - -// StopQueryExecutionWithContext is the same as StopQueryExecution with the addition of -// the ability to pass a context and additional request options. -// -// See StopQueryExecution for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) StopQueryExecutionWithContext(ctx aws.Context, input *StopQueryExecutionInput, opts ...request.Option) (*StopQueryExecutionOutput, error) { - req, out := c.StopQueryExecutionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTagResource = "TagResource" - -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TagResource for more information on using the TagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TagResource -func (c *Athena) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { - op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TagResourceInput{} - } - - output = &TagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// TagResource API operation for Amazon Athena. -// -// Adds one or more tags to an Athena resource. A tag is a label that you assign -// to a resource. Each tag consists of a key and an optional value, both of -// which you define. For example, you can use tags to categorize Athena workgroups, -// data catalogs, or capacity reservations by purpose, owner, or environment. -// Use a consistent set of tag keys to make it easier to search and filter the -// resources in your account. For best practices, see Tagging Best Practices -// (https://docs.aws.amazon.com/whitepapers/latest/tagging-best-practices/tagging-best-practices.html). -// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can -// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers -// representable in UTF-8, and the following characters: + - = . _ : / @. Tag -// keys and values are case-sensitive. Tag keys must be unique per resource. -// If you specify more than one tag, separate them by commas. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation TagResource for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TagResource -func (c *Athena) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - return out, req.Send() -} - -// TagResourceWithContext is the same as TagResource with the addition of -// the ability to pass a context and additional request options. -// -// See TagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTerminateSession = "TerminateSession" - -// TerminateSessionRequest generates a "aws/request.Request" representing the -// client's request for the TerminateSession operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TerminateSession for more information on using the TerminateSession -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the TerminateSessionRequest method. -// req, resp := client.TerminateSessionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TerminateSession -func (c *Athena) TerminateSessionRequest(input *TerminateSessionInput) (req *request.Request, output *TerminateSessionOutput) { - op := &request.Operation{ - Name: opTerminateSession, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TerminateSessionInput{} - } - - output = &TerminateSessionOutput{} - req = c.newRequest(op, input, output) - return -} - -// TerminateSession API operation for Amazon Athena. -// -// Terminates an active session. A TerminateSession call on a session that is -// already inactive (for example, in a FAILED, TERMINATED or TERMINATING state) -// succeeds but has no effect. Calculations running in the session when TerminateSession -// is called are forcefully stopped, but may display as FAILED instead of STOPPED. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation TerminateSession for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TerminateSession -func (c *Athena) TerminateSession(input *TerminateSessionInput) (*TerminateSessionOutput, error) { - req, out := c.TerminateSessionRequest(input) - return out, req.Send() -} - -// TerminateSessionWithContext is the same as TerminateSession with the addition of -// the ability to pass a context and additional request options. -// -// See TerminateSession for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) TerminateSessionWithContext(ctx aws.Context, input *TerminateSessionInput, opts ...request.Option) (*TerminateSessionOutput, error) { - req, out := c.TerminateSessionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUntagResource = "UntagResource" - -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UntagResource for more information on using the UntagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource -func (c *Athena) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { - op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UntagResourceInput{} - } - - output = &UntagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UntagResource API operation for Amazon Athena. -// -// Removes one or more tags from an Athena resource. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UntagResource for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource -func (c *Athena) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - return out, req.Send() -} - -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateCapacityReservation = "UpdateCapacityReservation" - -// UpdateCapacityReservationRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCapacityReservation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateCapacityReservation for more information on using the UpdateCapacityReservation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateCapacityReservationRequest method. -// req, resp := client.UpdateCapacityReservationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateCapacityReservation -func (c *Athena) UpdateCapacityReservationRequest(input *UpdateCapacityReservationInput) (req *request.Request, output *UpdateCapacityReservationOutput) { - op := &request.Operation{ - Name: opUpdateCapacityReservation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateCapacityReservationInput{} - } - - output = &UpdateCapacityReservationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateCapacityReservation API operation for Amazon Athena. -// -// Updates the number of requested data processing units for the capacity reservation -// with the specified name. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UpdateCapacityReservation for usage and error information. -// -// Returned Error Types: -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateCapacityReservation -func (c *Athena) UpdateCapacityReservation(input *UpdateCapacityReservationInput) (*UpdateCapacityReservationOutput, error) { - req, out := c.UpdateCapacityReservationRequest(input) - return out, req.Send() -} - -// UpdateCapacityReservationWithContext is the same as UpdateCapacityReservation with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateCapacityReservation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UpdateCapacityReservationWithContext(ctx aws.Context, input *UpdateCapacityReservationInput, opts ...request.Option) (*UpdateCapacityReservationOutput, error) { - req, out := c.UpdateCapacityReservationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateDataCatalog = "UpdateDataCatalog" - -// UpdateDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDataCatalog operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateDataCatalog for more information on using the UpdateDataCatalog -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateDataCatalogRequest method. -// req, resp := client.UpdateDataCatalogRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog -func (c *Athena) UpdateDataCatalogRequest(input *UpdateDataCatalogInput) (req *request.Request, output *UpdateDataCatalogOutput) { - op := &request.Operation{ - Name: opUpdateDataCatalog, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateDataCatalogInput{} - } - - output = &UpdateDataCatalogOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateDataCatalog API operation for Amazon Athena. -// -// Updates the data catalog that has the specified name. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UpdateDataCatalog for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog -func (c *Athena) UpdateDataCatalog(input *UpdateDataCatalogInput) (*UpdateDataCatalogOutput, error) { - req, out := c.UpdateDataCatalogRequest(input) - return out, req.Send() -} - -// UpdateDataCatalogWithContext is the same as UpdateDataCatalog with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateDataCatalog for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UpdateDataCatalogWithContext(ctx aws.Context, input *UpdateDataCatalogInput, opts ...request.Option) (*UpdateDataCatalogOutput, error) { - req, out := c.UpdateDataCatalogRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateNamedQuery = "UpdateNamedQuery" - -// UpdateNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the UpdateNamedQuery operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateNamedQuery for more information on using the UpdateNamedQuery -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateNamedQueryRequest method. -// req, resp := client.UpdateNamedQueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNamedQuery -func (c *Athena) UpdateNamedQueryRequest(input *UpdateNamedQueryInput) (req *request.Request, output *UpdateNamedQueryOutput) { - op := &request.Operation{ - Name: opUpdateNamedQuery, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateNamedQueryInput{} - } - - output = &UpdateNamedQueryOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateNamedQuery API operation for Amazon Athena. -// -// Updates a NamedQuery object. The database or workgroup cannot be updated. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UpdateNamedQuery for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNamedQuery -func (c *Athena) UpdateNamedQuery(input *UpdateNamedQueryInput) (*UpdateNamedQueryOutput, error) { - req, out := c.UpdateNamedQueryRequest(input) - return out, req.Send() -} - -// UpdateNamedQueryWithContext is the same as UpdateNamedQuery with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateNamedQuery for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UpdateNamedQueryWithContext(ctx aws.Context, input *UpdateNamedQueryInput, opts ...request.Option) (*UpdateNamedQueryOutput, error) { - req, out := c.UpdateNamedQueryRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateNotebook = "UpdateNotebook" - -// UpdateNotebookRequest generates a "aws/request.Request" representing the -// client's request for the UpdateNotebook operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateNotebook for more information on using the UpdateNotebook -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateNotebookRequest method. -// req, resp := client.UpdateNotebookRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebook -func (c *Athena) UpdateNotebookRequest(input *UpdateNotebookInput) (req *request.Request, output *UpdateNotebookOutput) { - op := &request.Operation{ - Name: opUpdateNotebook, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateNotebookInput{} - } - - output = &UpdateNotebookOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateNotebook API operation for Amazon Athena. -// -// Updates the contents of a Spark notebook. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UpdateNotebook for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebook -func (c *Athena) UpdateNotebook(input *UpdateNotebookInput) (*UpdateNotebookOutput, error) { - req, out := c.UpdateNotebookRequest(input) - return out, req.Send() -} - -// UpdateNotebookWithContext is the same as UpdateNotebook with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateNotebook for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UpdateNotebookWithContext(ctx aws.Context, input *UpdateNotebookInput, opts ...request.Option) (*UpdateNotebookOutput, error) { - req, out := c.UpdateNotebookRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateNotebookMetadata = "UpdateNotebookMetadata" - -// UpdateNotebookMetadataRequest generates a "aws/request.Request" representing the -// client's request for the UpdateNotebookMetadata operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateNotebookMetadata for more information on using the UpdateNotebookMetadata -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateNotebookMetadataRequest method. -// req, resp := client.UpdateNotebookMetadataRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebookMetadata -func (c *Athena) UpdateNotebookMetadataRequest(input *UpdateNotebookMetadataInput) (req *request.Request, output *UpdateNotebookMetadataOutput) { - op := &request.Operation{ - Name: opUpdateNotebookMetadata, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateNotebookMetadataInput{} - } - - output = &UpdateNotebookMetadataOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateNotebookMetadata API operation for Amazon Athena. -// -// Updates the metadata for a notebook. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UpdateNotebookMetadata for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - TooManyRequestsException -// Indicates that the request was throttled. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebookMetadata -func (c *Athena) UpdateNotebookMetadata(input *UpdateNotebookMetadataInput) (*UpdateNotebookMetadataOutput, error) { - req, out := c.UpdateNotebookMetadataRequest(input) - return out, req.Send() -} - -// UpdateNotebookMetadataWithContext is the same as UpdateNotebookMetadata with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateNotebookMetadata for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UpdateNotebookMetadataWithContext(ctx aws.Context, input *UpdateNotebookMetadataInput, opts ...request.Option) (*UpdateNotebookMetadataOutput, error) { - req, out := c.UpdateNotebookMetadataRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdatePreparedStatement = "UpdatePreparedStatement" - -// UpdatePreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePreparedStatement operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdatePreparedStatement for more information on using the UpdatePreparedStatement -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdatePreparedStatementRequest method. -// req, resp := client.UpdatePreparedStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdatePreparedStatement -func (c *Athena) UpdatePreparedStatementRequest(input *UpdatePreparedStatementInput) (req *request.Request, output *UpdatePreparedStatementOutput) { - op := &request.Operation{ - Name: opUpdatePreparedStatement, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdatePreparedStatementInput{} - } - - output = &UpdatePreparedStatementOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdatePreparedStatement API operation for Amazon Athena. -// -// Updates a prepared statement. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UpdatePreparedStatement for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// - ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdatePreparedStatement -func (c *Athena) UpdatePreparedStatement(input *UpdatePreparedStatementInput) (*UpdatePreparedStatementOutput, error) { - req, out := c.UpdatePreparedStatementRequest(input) - return out, req.Send() -} - -// UpdatePreparedStatementWithContext is the same as UpdatePreparedStatement with the addition of -// the ability to pass a context and additional request options. -// -// See UpdatePreparedStatement for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UpdatePreparedStatementWithContext(ctx aws.Context, input *UpdatePreparedStatementInput, opts ...request.Option) (*UpdatePreparedStatementOutput, error) { - req, out := c.UpdatePreparedStatementRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateWorkGroup = "UpdateWorkGroup" - -// UpdateWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the UpdateWorkGroup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateWorkGroup for more information on using the UpdateWorkGroup -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// // Example sending a request using the UpdateWorkGroupRequest method. -// req, resp := client.UpdateWorkGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateWorkGroup -func (c *Athena) UpdateWorkGroupRequest(input *UpdateWorkGroupInput) (req *request.Request, output *UpdateWorkGroupOutput) { - op := &request.Operation{ - Name: opUpdateWorkGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateWorkGroupInput{} - } - - output = &UpdateWorkGroupOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateWorkGroup API operation for Amazon Athena. -// -// Updates the workgroup with the specified name. The workgroup's name cannot -// be changed. Only ConfigurationUpdates can be specified. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon Athena's -// API operation UpdateWorkGroup for usage and error information. -// -// Returned Error Types: -// -// - InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. -// -// - InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateWorkGroup -func (c *Athena) UpdateWorkGroup(input *UpdateWorkGroupInput) (*UpdateWorkGroupOutput, error) { - req, out := c.UpdateWorkGroupRequest(input) - return out, req.Send() -} - -// UpdateWorkGroupWithContext is the same as UpdateWorkGroup with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateWorkGroup for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) UpdateWorkGroupWithContext(ctx aws.Context, input *UpdateWorkGroupInput, opts ...request.Option) (*UpdateWorkGroupOutput, error) { - req, out := c.UpdateWorkGroupRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Indicates that an Amazon S3 canned ACL should be set to control ownership -// of stored query results. When Athena stores query results in Amazon S3, the -// canned ACL is set with the x-amz-acl request header. For more information -// about S3 Object Ownership, see Object Ownership settings (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html#object-ownership-overview) -// in the Amazon S3 User Guide. -type AclConfiguration struct { - _ struct{} `type:"structure"` - - // The Amazon S3 canned ACL that Athena should specify when storing query results. - // Currently the only supported canned ACL is BUCKET_OWNER_FULL_CONTROL. If - // a query runs in a workgroup and the workgroup overrides client-side settings, - // then the Amazon S3 canned ACL specified in the workgroup's settings is used - // for all queries that run in the workgroup. For more information about Amazon - // S3 canned ACLs, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) - // in the Amazon S3 User Guide. - // - // S3AclOption is a required field - S3AclOption *string `type:"string" required:"true" enum:"S3AclOption"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AclConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AclConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AclConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AclConfiguration"} - if s.S3AclOption == nil { - invalidParams.Add(request.NewErrParamRequired("S3AclOption")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetS3AclOption sets the S3AclOption field's value. -func (s *AclConfiguration) SetS3AclOption(v string) *AclConfiguration { - s.S3AclOption = &v - return s -} - -// Contains the application runtime IDs and their supported DPU sizes. -type ApplicationDPUSizes struct { - _ struct{} `type:"structure"` - - // The name of the supported application runtime (for example, Athena notebook - // version 1). - ApplicationRuntimeId *string `min:"1" type:"string"` - - // A list of the supported DPU sizes that the application runtime supports. - SupportedDPUSizes []*int64 `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ApplicationDPUSizes) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ApplicationDPUSizes) GoString() string { - return s.String() -} - -// SetApplicationRuntimeId sets the ApplicationRuntimeId field's value. -func (s *ApplicationDPUSizes) SetApplicationRuntimeId(v string) *ApplicationDPUSizes { - s.ApplicationRuntimeId = &v - return s -} - -// SetSupportedDPUSizes sets the SupportedDPUSizes field's value. -func (s *ApplicationDPUSizes) SetSupportedDPUSizes(v []*int64) *ApplicationDPUSizes { - s.SupportedDPUSizes = v - return s -} - -// Provides information about an Athena query error. The AthenaError feature -// provides standardized error information to help you understand failed queries -// and take steps after a query failure occurs. AthenaError includes an ErrorCategory -// field that specifies whether the cause of the failed query is due to system -// error, user error, or other error. -type AthenaError struct { - _ struct{} `type:"structure"` - - // An integer value that specifies the category of a query failure error. The - // following list shows the category for each integer value. - // - // 1 - System - // - // 2 - User - // - // 3 - Other - ErrorCategory *int64 `min:"1" type:"integer"` - - // Contains a short description of the error that occurred. - ErrorMessage *string `type:"string"` - - // An integer value that provides specific information about an Athena query - // error. For the meaning of specific values, see the Error Type Reference (https://docs.aws.amazon.com/athena/latest/ug/error-reference.html#error-reference-error-type-reference) - // in the Amazon Athena User Guide. - ErrorType *int64 `type:"integer"` - - // True if the query might succeed if resubmitted. - Retryable *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AthenaError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AthenaError) GoString() string { - return s.String() -} - -// SetErrorCategory sets the ErrorCategory field's value. -func (s *AthenaError) SetErrorCategory(v int64) *AthenaError { - s.ErrorCategory = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *AthenaError) SetErrorMessage(v string) *AthenaError { - s.ErrorMessage = &v - return s -} - -// SetErrorType sets the ErrorType field's value. -func (s *AthenaError) SetErrorType(v int64) *AthenaError { - s.ErrorType = &v - return s -} - -// SetRetryable sets the Retryable field's value. -func (s *AthenaError) SetRetryable(v bool) *AthenaError { - s.Retryable = &v - return s -} - -// Contains an array of named query IDs. -type BatchGetNamedQueryInput struct { - _ struct{} `type:"structure"` - - // An array of query IDs. - // - // NamedQueryIds is a required field - NamedQueryIds []*string `min:"1" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetNamedQueryInput"} - if s.NamedQueryIds == nil { - invalidParams.Add(request.NewErrParamRequired("NamedQueryIds")) - } - if s.NamedQueryIds != nil && len(s.NamedQueryIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NamedQueryIds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNamedQueryIds sets the NamedQueryIds field's value. -func (s *BatchGetNamedQueryInput) SetNamedQueryIds(v []*string) *BatchGetNamedQueryInput { - s.NamedQueryIds = v - return s -} - -type BatchGetNamedQueryOutput struct { - _ struct{} `type:"structure"` - - // Information about the named query IDs submitted. - NamedQueries []*NamedQuery `type:"list"` - - // Information about provided query IDs. - UnprocessedNamedQueryIds []*UnprocessedNamedQueryId `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryOutput) GoString() string { - return s.String() -} - -// SetNamedQueries sets the NamedQueries field's value. -func (s *BatchGetNamedQueryOutput) SetNamedQueries(v []*NamedQuery) *BatchGetNamedQueryOutput { - s.NamedQueries = v - return s -} - -// SetUnprocessedNamedQueryIds sets the UnprocessedNamedQueryIds field's value. -func (s *BatchGetNamedQueryOutput) SetUnprocessedNamedQueryIds(v []*UnprocessedNamedQueryId) *BatchGetNamedQueryOutput { - s.UnprocessedNamedQueryIds = v - return s -} - -type BatchGetPreparedStatementInput struct { - _ struct{} `type:"structure"` - - // A list of prepared statement names to return. - // - // PreparedStatementNames is a required field - PreparedStatementNames []*string `type:"list" required:"true"` - - // The name of the workgroup to which the prepared statements belong. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetPreparedStatementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetPreparedStatementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetPreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetPreparedStatementInput"} - if s.PreparedStatementNames == nil { - invalidParams.Add(request.NewErrParamRequired("PreparedStatementNames")) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPreparedStatementNames sets the PreparedStatementNames field's value. -func (s *BatchGetPreparedStatementInput) SetPreparedStatementNames(v []*string) *BatchGetPreparedStatementInput { - s.PreparedStatementNames = v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *BatchGetPreparedStatementInput) SetWorkGroup(v string) *BatchGetPreparedStatementInput { - s.WorkGroup = &v - return s -} - -type BatchGetPreparedStatementOutput struct { - _ struct{} `type:"structure"` - - // The list of prepared statements returned. - PreparedStatements []*PreparedStatement `type:"list"` - - // A list of one or more prepared statements that were requested but could not - // be returned. - UnprocessedPreparedStatementNames []*UnprocessedPreparedStatementName `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetPreparedStatementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetPreparedStatementOutput) GoString() string { - return s.String() -} - -// SetPreparedStatements sets the PreparedStatements field's value. -func (s *BatchGetPreparedStatementOutput) SetPreparedStatements(v []*PreparedStatement) *BatchGetPreparedStatementOutput { - s.PreparedStatements = v - return s -} - -// SetUnprocessedPreparedStatementNames sets the UnprocessedPreparedStatementNames field's value. -func (s *BatchGetPreparedStatementOutput) SetUnprocessedPreparedStatementNames(v []*UnprocessedPreparedStatementName) *BatchGetPreparedStatementOutput { - s.UnprocessedPreparedStatementNames = v - return s -} - -// Contains an array of query execution IDs. -type BatchGetQueryExecutionInput struct { - _ struct{} `type:"structure"` - - // An array of query execution IDs. - // - // QueryExecutionIds is a required field - QueryExecutionIds []*string `min:"1" type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetQueryExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetQueryExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetQueryExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetQueryExecutionInput"} - if s.QueryExecutionIds == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionIds")) - } - if s.QueryExecutionIds != nil && len(s.QueryExecutionIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryExecutionIds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueryExecutionIds sets the QueryExecutionIds field's value. -func (s *BatchGetQueryExecutionInput) SetQueryExecutionIds(v []*string) *BatchGetQueryExecutionInput { - s.QueryExecutionIds = v - return s -} - -type BatchGetQueryExecutionOutput struct { - _ struct{} `type:"structure"` - - // Information about a query execution. - QueryExecutions []*QueryExecution `type:"list"` - - // Information about the query executions that failed to run. - UnprocessedQueryExecutionIds []*UnprocessedQueryExecutionId `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetQueryExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetQueryExecutionOutput) GoString() string { - return s.String() -} - -// SetQueryExecutions sets the QueryExecutions field's value. -func (s *BatchGetQueryExecutionOutput) SetQueryExecutions(v []*QueryExecution) *BatchGetQueryExecutionOutput { - s.QueryExecutions = v - return s -} - -// SetUnprocessedQueryExecutionIds sets the UnprocessedQueryExecutionIds field's value. -func (s *BatchGetQueryExecutionOutput) SetUnprocessedQueryExecutionIds(v []*UnprocessedQueryExecutionId) *BatchGetQueryExecutionOutput { - s.UnprocessedQueryExecutionIds = v - return s -} - -// Contains configuration information for the calculation. -type CalculationConfiguration struct { - _ struct{} `type:"structure"` - - // A string that contains the code for the calculation. - CodeBlock *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationConfiguration) GoString() string { - return s.String() -} - -// SetCodeBlock sets the CodeBlock field's value. -func (s *CalculationConfiguration) SetCodeBlock(v string) *CalculationConfiguration { - s.CodeBlock = &v - return s -} - -// Contains information about an application-specific calculation result. -type CalculationResult struct { - _ struct{} `type:"structure"` - - // The Amazon S3 location of the folder for the calculation results. - ResultS3Uri *string `type:"string"` - - // The data format of the calculation result. - ResultType *string `min:"1" type:"string"` - - // The Amazon S3 location of the stderr error messages file for the calculation. - StdErrorS3Uri *string `type:"string"` - - // The Amazon S3 location of the stdout file for the calculation. - StdOutS3Uri *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationResult) GoString() string { - return s.String() -} - -// SetResultS3Uri sets the ResultS3Uri field's value. -func (s *CalculationResult) SetResultS3Uri(v string) *CalculationResult { - s.ResultS3Uri = &v - return s -} - -// SetResultType sets the ResultType field's value. -func (s *CalculationResult) SetResultType(v string) *CalculationResult { - s.ResultType = &v - return s -} - -// SetStdErrorS3Uri sets the StdErrorS3Uri field's value. -func (s *CalculationResult) SetStdErrorS3Uri(v string) *CalculationResult { - s.StdErrorS3Uri = &v - return s -} - -// SetStdOutS3Uri sets the StdOutS3Uri field's value. -func (s *CalculationResult) SetStdOutS3Uri(v string) *CalculationResult { - s.StdOutS3Uri = &v - return s -} - -// Contains statistics for a notebook calculation. -type CalculationStatistics struct { - _ struct{} `type:"structure"` - - // The data processing unit execution time in milliseconds for the calculation. - DpuExecutionInMillis *int64 `type:"long"` - - // The progress of the calculation. - Progress *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationStatistics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationStatistics) GoString() string { - return s.String() -} - -// SetDpuExecutionInMillis sets the DpuExecutionInMillis field's value. -func (s *CalculationStatistics) SetDpuExecutionInMillis(v int64) *CalculationStatistics { - s.DpuExecutionInMillis = &v - return s -} - -// SetProgress sets the Progress field's value. -func (s *CalculationStatistics) SetProgress(v string) *CalculationStatistics { - s.Progress = &v - return s -} - -// Contains information about the status of a notebook calculation. -type CalculationStatus struct { - _ struct{} `type:"structure"` - - // The date and time the calculation completed processing. - CompletionDateTime *time.Time `type:"timestamp"` - - // The state of the calculation execution. A description of each state follows. - // - // CREATING - The calculation is in the process of being created. - // - // CREATED - The calculation has been created and is ready to run. - // - // QUEUED - The calculation has been queued for processing. - // - // RUNNING - The calculation is running. - // - // CANCELING - A request to cancel the calculation has been received and the - // system is working to stop it. - // - // CANCELED - The calculation is no longer running as the result of a cancel - // request. - // - // COMPLETED - The calculation has completed without error. - // - // FAILED - The calculation failed and is no longer running. - State *string `type:"string" enum:"CalculationExecutionState"` - - // The reason for the calculation state change (for example, the calculation - // was canceled because the session was terminated). - StateChangeReason *string `min:"1" type:"string"` - - // The date and time the calculation was submitted for processing. - SubmissionDateTime *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationStatus) GoString() string { - return s.String() -} - -// SetCompletionDateTime sets the CompletionDateTime field's value. -func (s *CalculationStatus) SetCompletionDateTime(v time.Time) *CalculationStatus { - s.CompletionDateTime = &v - return s -} - -// SetState sets the State field's value. -func (s *CalculationStatus) SetState(v string) *CalculationStatus { - s.State = &v - return s -} - -// SetStateChangeReason sets the StateChangeReason field's value. -func (s *CalculationStatus) SetStateChangeReason(v string) *CalculationStatus { - s.StateChangeReason = &v - return s -} - -// SetSubmissionDateTime sets the SubmissionDateTime field's value. -func (s *CalculationStatus) SetSubmissionDateTime(v time.Time) *CalculationStatus { - s.SubmissionDateTime = &v - return s -} - -// Summary information for a notebook calculation. -type CalculationSummary struct { - _ struct{} `type:"structure"` - - // The calculation execution UUID. - CalculationExecutionId *string `min:"1" type:"string"` - - // A description of the calculation. - Description *string `min:"1" type:"string"` - - // Contains information about the status of the calculation. - Status *CalculationStatus `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CalculationSummary) GoString() string { - return s.String() -} - -// SetCalculationExecutionId sets the CalculationExecutionId field's value. -func (s *CalculationSummary) SetCalculationExecutionId(v string) *CalculationSummary { - s.CalculationExecutionId = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CalculationSummary) SetDescription(v string) *CalculationSummary { - s.Description = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *CalculationSummary) SetStatus(v *CalculationStatus) *CalculationSummary { - s.Status = v - return s -} - -type CancelCapacityReservationInput struct { - _ struct{} `type:"structure"` - - // The name of the capacity reservation to cancel. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelCapacityReservationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelCapacityReservationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CancelCapacityReservationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelCapacityReservationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *CancelCapacityReservationInput) SetName(v string) *CancelCapacityReservationInput { - s.Name = &v - return s -} - -type CancelCapacityReservationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelCapacityReservationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelCapacityReservationOutput) GoString() string { - return s.String() -} - -// Contains the submission time of a single allocation request for a capacity -// reservation and the most recent status of the attempted allocation. -type CapacityAllocation struct { - _ struct{} `type:"structure"` - - // The time when the capacity allocation request was completed. - RequestCompletionTime *time.Time `type:"timestamp"` - - // The time when the capacity allocation was requested. - // - // RequestTime is a required field - RequestTime *time.Time `type:"timestamp" required:"true"` - - // The status of the capacity allocation. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"CapacityAllocationStatus"` - - // The status message of the capacity allocation. - StatusMessage *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityAllocation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityAllocation) GoString() string { - return s.String() -} - -// SetRequestCompletionTime sets the RequestCompletionTime field's value. -func (s *CapacityAllocation) SetRequestCompletionTime(v time.Time) *CapacityAllocation { - s.RequestCompletionTime = &v - return s -} - -// SetRequestTime sets the RequestTime field's value. -func (s *CapacityAllocation) SetRequestTime(v time.Time) *CapacityAllocation { - s.RequestTime = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *CapacityAllocation) SetStatus(v string) *CapacityAllocation { - s.Status = &v - return s -} - -// SetStatusMessage sets the StatusMessage field's value. -func (s *CapacityAllocation) SetStatusMessage(v string) *CapacityAllocation { - s.StatusMessage = &v - return s -} - -// A mapping between one or more workgroups and a capacity reservation. -type CapacityAssignment struct { - _ struct{} `type:"structure"` - - // The list of workgroup names for the capacity assignment. - WorkGroupNames []*string `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityAssignment) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityAssignment) GoString() string { - return s.String() -} - -// SetWorkGroupNames sets the WorkGroupNames field's value. -func (s *CapacityAssignment) SetWorkGroupNames(v []*string) *CapacityAssignment { - s.WorkGroupNames = v - return s -} - -// Assigns Athena workgroups (and hence their queries) to capacity reservations. -// A capacity reservation can have only one capacity assignment configuration, -// but the capacity assignment configuration can be made up of multiple individual -// assignments. Each assignment specifies how Athena queries can consume capacity -// from the capacity reservation that their workgroup is mapped to. -type CapacityAssignmentConfiguration struct { - _ struct{} `type:"structure"` - - // The list of assignments that make up the capacity assignment configuration. - CapacityAssignments []*CapacityAssignment `type:"list"` - - // The name of the reservation that the capacity assignment configuration is - // for. - CapacityReservationName *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityAssignmentConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityAssignmentConfiguration) GoString() string { - return s.String() -} - -// SetCapacityAssignments sets the CapacityAssignments field's value. -func (s *CapacityAssignmentConfiguration) SetCapacityAssignments(v []*CapacityAssignment) *CapacityAssignmentConfiguration { - s.CapacityAssignments = v - return s -} - -// SetCapacityReservationName sets the CapacityReservationName field's value. -func (s *CapacityAssignmentConfiguration) SetCapacityReservationName(v string) *CapacityAssignmentConfiguration { - s.CapacityReservationName = &v - return s -} - -// A reservation for a specified number of data processing units (DPUs). When -// a reservation is initially created, it has no DPUs. Athena allocates DPUs -// until the allocated amount equals the requested amount. -type CapacityReservation struct { - _ struct{} `type:"structure"` - - // The number of data processing units currently allocated. - // - // AllocatedDpus is a required field - AllocatedDpus *int64 `type:"integer" required:"true"` - - // The time in UTC epoch millis when the capacity reservation was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" required:"true"` - - // Contains the submission time of a single allocation request for a capacity - // reservation and the most recent status of the attempted allocation. - LastAllocation *CapacityAllocation `type:"structure"` - - // The time of the most recent capacity allocation that succeeded. - LastSuccessfulAllocationTime *time.Time `type:"timestamp"` - - // The name of the capacity reservation. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The status of the capacity reservation. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"CapacityReservationStatus"` - - // The number of data processing units requested. - // - // TargetDpus is a required field - TargetDpus *int64 `min:"24" type:"integer" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityReservation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CapacityReservation) GoString() string { - return s.String() -} - -// SetAllocatedDpus sets the AllocatedDpus field's value. -func (s *CapacityReservation) SetAllocatedDpus(v int64) *CapacityReservation { - s.AllocatedDpus = &v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *CapacityReservation) SetCreationTime(v time.Time) *CapacityReservation { - s.CreationTime = &v - return s -} - -// SetLastAllocation sets the LastAllocation field's value. -func (s *CapacityReservation) SetLastAllocation(v *CapacityAllocation) *CapacityReservation { - s.LastAllocation = v - return s -} - -// SetLastSuccessfulAllocationTime sets the LastSuccessfulAllocationTime field's value. -func (s *CapacityReservation) SetLastSuccessfulAllocationTime(v time.Time) *CapacityReservation { - s.LastSuccessfulAllocationTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *CapacityReservation) SetName(v string) *CapacityReservation { - s.Name = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *CapacityReservation) SetStatus(v string) *CapacityReservation { - s.Status = &v - return s -} - -// SetTargetDpus sets the TargetDpus field's value. -func (s *CapacityReservation) SetTargetDpus(v int64) *CapacityReservation { - s.TargetDpus = &v - return s -} - -// Contains metadata for a column in a table. -type Column struct { - _ struct{} `type:"structure"` - - // Optional information about the column. - Comment *string `type:"string"` - - // The name of the column. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The data type of the column. - Type *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Column) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Column) GoString() string { - return s.String() -} - -// SetComment sets the Comment field's value. -func (s *Column) SetComment(v string) *Column { - s.Comment = &v - return s -} - -// SetName sets the Name field's value. -func (s *Column) SetName(v string) *Column { - s.Name = &v - return s -} - -// SetType sets the Type field's value. -func (s *Column) SetType(v string) *Column { - s.Type = &v - return s -} - -// Information about the columns in a query execution result. -type ColumnInfo struct { - _ struct{} `type:"structure"` - - // Indicates whether values in the column are case-sensitive. - CaseSensitive *bool `type:"boolean"` - - // The catalog to which the query results belong. - CatalogName *string `type:"string"` - - // A column label. - Label *string `type:"string"` - - // The name of the column. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // Unsupported constraint. This value always shows as UNKNOWN. - Nullable *string `type:"string" enum:"ColumnNullable"` - - // For DECIMAL data types, specifies the total number of digits, up to 38. For - // performance reasons, we recommend up to 18 digits. - Precision *int64 `type:"integer"` - - // For DECIMAL data types, specifies the total number of digits in the fractional - // part of the value. Defaults to 0. - Scale *int64 `type:"integer"` - - // The schema name (database name) to which the query results belong. - SchemaName *string `type:"string"` - - // The table name for the query results. - TableName *string `type:"string"` - - // The data type of the column. - // - // Type is a required field - Type *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ColumnInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ColumnInfo) GoString() string { - return s.String() -} - -// SetCaseSensitive sets the CaseSensitive field's value. -func (s *ColumnInfo) SetCaseSensitive(v bool) *ColumnInfo { - s.CaseSensitive = &v - return s -} - -// SetCatalogName sets the CatalogName field's value. -func (s *ColumnInfo) SetCatalogName(v string) *ColumnInfo { - s.CatalogName = &v - return s -} - -// SetLabel sets the Label field's value. -func (s *ColumnInfo) SetLabel(v string) *ColumnInfo { - s.Label = &v - return s -} - -// SetName sets the Name field's value. -func (s *ColumnInfo) SetName(v string) *ColumnInfo { - s.Name = &v - return s -} - -// SetNullable sets the Nullable field's value. -func (s *ColumnInfo) SetNullable(v string) *ColumnInfo { - s.Nullable = &v - return s -} - -// SetPrecision sets the Precision field's value. -func (s *ColumnInfo) SetPrecision(v int64) *ColumnInfo { - s.Precision = &v - return s -} - -// SetScale sets the Scale field's value. -func (s *ColumnInfo) SetScale(v int64) *ColumnInfo { - s.Scale = &v - return s -} - -// SetSchemaName sets the SchemaName field's value. -func (s *ColumnInfo) SetSchemaName(v string) *ColumnInfo { - s.SchemaName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *ColumnInfo) SetTableName(v string) *ColumnInfo { - s.TableName = &v - return s -} - -// SetType sets the Type field's value. -func (s *ColumnInfo) SetType(v string) *ColumnInfo { - s.Type = &v - return s -} - -type CreateCapacityReservationInput struct { - _ struct{} `type:"structure"` - - // The name of the capacity reservation to create. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The tags for the capacity reservation. - Tags []*Tag `type:"list"` - - // The number of requested data processing units. - // - // TargetDpus is a required field - TargetDpus *int64 `min:"24" type:"integer" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCapacityReservationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCapacityReservationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCapacityReservationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCapacityReservationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.TargetDpus == nil { - invalidParams.Add(request.NewErrParamRequired("TargetDpus")) - } - if s.TargetDpus != nil && *s.TargetDpus < 24 { - invalidParams.Add(request.NewErrParamMinValue("TargetDpus", 24)) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *CreateCapacityReservationInput) SetName(v string) *CreateCapacityReservationInput { - s.Name = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateCapacityReservationInput) SetTags(v []*Tag) *CreateCapacityReservationInput { - s.Tags = v - return s -} - -// SetTargetDpus sets the TargetDpus field's value. -func (s *CreateCapacityReservationInput) SetTargetDpus(v int64) *CreateCapacityReservationInput { - s.TargetDpus = &v - return s -} - -type CreateCapacityReservationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCapacityReservationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCapacityReservationOutput) GoString() string { - return s.String() -} - -type CreateDataCatalogInput struct { - _ struct{} `type:"structure"` - - // A description of the data catalog to be created. - Description *string `min:"1" type:"string"` - - // The name of the data catalog to create. The catalog name must be unique for - // the Amazon Web Services account and can use a maximum of 127 alphanumeric, - // underscore, at sign, or hyphen characters. The remainder of the length constraint - // of 256 is reserved for use by Athena. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Specifies the Lambda function or functions to use for creating the data catalog. - // This is a mapping whose values depend on the catalog type. - // - // * For the HIVE data catalog type, use the following syntax. The metadata-function - // parameter is required. The sdk-version parameter is optional and defaults - // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number - // - // * For the LAMBDA data catalog type, use one of the following sets of required - // parameters, but not both. If you have one Lambda function that processes - // metadata and another for reading the actual data, use the following syntax. - // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn - // If you have a composite Lambda function that processes both metadata and - // data, use the following syntax to specify your Lambda function. function=lambda_arn - // - // * The GLUE type takes a catalog ID parameter and is required. The catalog_id - // is the account ID of the Amazon Web Services account to which the Glue - // Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type - // also applies to the default AwsDataCatalog that already exists in your - // account, of which you can have only one and cannot modify. - Parameters map[string]*string `type:"map"` - - // A list of comma separated tags to add to the data catalog that is created. - Tags []*Tag `type:"list"` - - // The type of data catalog to create: LAMBDA for a federated catalog, HIVE - // for an external hive metastore, or GLUE for an Glue Data Catalog. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"DataCatalogType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateDataCatalogInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateDataCatalogInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDataCatalogInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *CreateDataCatalogInput) SetDescription(v string) *CreateDataCatalogInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateDataCatalogInput) SetName(v string) *CreateDataCatalogInput { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *CreateDataCatalogInput) SetParameters(v map[string]*string) *CreateDataCatalogInput { - s.Parameters = v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateDataCatalogInput) SetTags(v []*Tag) *CreateDataCatalogInput { - s.Tags = v - return s -} - -// SetType sets the Type field's value. -func (s *CreateDataCatalogInput) SetType(v string) *CreateDataCatalogInput { - s.Type = &v - return s -} - -type CreateDataCatalogOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateDataCatalogOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateDataCatalogOutput) GoString() string { - return s.String() -} - -type CreateNamedQueryInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the query - // is idempotent (executes only once). If another CreateNamedQuery request is - // received, the same response is returned and another query is not created. - // If a parameter has changed, for example, the QueryString, an error is returned. - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // users. If you are not using the Amazon Web Services SDK or the Amazon Web - // Services CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` - - // The database to which the query belongs. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` - - // The query description. - Description *string `min:"1" type:"string"` - - // The query name. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The contents of the query with all query statements. - // - // QueryString is a required field - QueryString *string `min:"1" type:"string" required:"true"` - - // The name of the workgroup in which the named query is being created. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNamedQueryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNamedQueryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateNamedQueryInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) - } - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.QueryString == nil { - invalidParams.Add(request.NewErrParamRequired("QueryString")) - } - if s.QueryString != nil && len(*s.QueryString) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *CreateNamedQueryInput) SetClientRequestToken(v string) *CreateNamedQueryInput { - s.ClientRequestToken = &v - return s -} - -// SetDatabase sets the Database field's value. -func (s *CreateNamedQueryInput) SetDatabase(v string) *CreateNamedQueryInput { - s.Database = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateNamedQueryInput) SetDescription(v string) *CreateNamedQueryInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateNamedQueryInput) SetName(v string) *CreateNamedQueryInput { - s.Name = &v - return s -} - -// SetQueryString sets the QueryString field's value. -func (s *CreateNamedQueryInput) SetQueryString(v string) *CreateNamedQueryInput { - s.QueryString = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *CreateNamedQueryInput) SetWorkGroup(v string) *CreateNamedQueryInput { - s.WorkGroup = &v - return s -} - -type CreateNamedQueryOutput struct { - _ struct{} `type:"structure"` - - // The unique ID of the query. - NamedQueryId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNamedQueryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNamedQueryOutput) GoString() string { - return s.String() -} - -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *CreateNamedQueryOutput) SetNamedQueryId(v string) *CreateNamedQueryOutput { - s.NamedQueryId = &v - return s -} - -type CreateNotebookInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the notebook - // is idempotent (executes only once). - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services - // CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"1" type:"string"` - - // The name of the ipynb file to be created in the Spark workgroup, without - // the .ipynb extension. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The name of the Spark enabled workgroup in which the notebook will be created. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNotebookInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNotebookInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateNotebookInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateNotebookInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *CreateNotebookInput) SetClientRequestToken(v string) *CreateNotebookInput { - s.ClientRequestToken = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateNotebookInput) SetName(v string) *CreateNotebookInput { - s.Name = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *CreateNotebookInput) SetWorkGroup(v string) *CreateNotebookInput { - s.WorkGroup = &v - return s -} - -type CreateNotebookOutput struct { - _ struct{} `type:"structure"` - - // A unique identifier for the notebook. - NotebookId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNotebookOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateNotebookOutput) GoString() string { - return s.String() -} - -// SetNotebookId sets the NotebookId field's value. -func (s *CreateNotebookOutput) SetNotebookId(v string) *CreateNotebookOutput { - s.NotebookId = &v - return s -} - -type CreatePreparedStatementInput struct { - _ struct{} `type:"structure"` - - // The description of the prepared statement. - Description *string `min:"1" type:"string"` - - // The query string for the prepared statement. - // - // QueryStatement is a required field - QueryStatement *string `min:"1" type:"string" required:"true"` - - // The name of the prepared statement. - // - // StatementName is a required field - StatementName *string `min:"1" type:"string" required:"true"` - - // The name of the workgroup to which the prepared statement belongs. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePreparedStatementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePreparedStatementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePreparedStatementInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.QueryStatement == nil { - invalidParams.Add(request.NewErrParamRequired("QueryStatement")) - } - if s.QueryStatement != nil && len(*s.QueryStatement) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryStatement", 1)) - } - if s.StatementName == nil { - invalidParams.Add(request.NewErrParamRequired("StatementName")) - } - if s.StatementName != nil && len(*s.StatementName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *CreatePreparedStatementInput) SetDescription(v string) *CreatePreparedStatementInput { - s.Description = &v - return s -} - -// SetQueryStatement sets the QueryStatement field's value. -func (s *CreatePreparedStatementInput) SetQueryStatement(v string) *CreatePreparedStatementInput { - s.QueryStatement = &v - return s -} - -// SetStatementName sets the StatementName field's value. -func (s *CreatePreparedStatementInput) SetStatementName(v string) *CreatePreparedStatementInput { - s.StatementName = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *CreatePreparedStatementInput) SetWorkGroup(v string) *CreatePreparedStatementInput { - s.WorkGroup = &v - return s -} - -type CreatePreparedStatementOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePreparedStatementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePreparedStatementOutput) GoString() string { - return s.String() -} - -type CreatePresignedNotebookUrlInput struct { - _ struct{} `type:"structure"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePresignedNotebookUrlInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePresignedNotebookUrlInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePresignedNotebookUrlInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePresignedNotebookUrlInput"} - if s.SessionId == nil { - invalidParams.Add(request.NewErrParamRequired("SessionId")) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSessionId sets the SessionId field's value. -func (s *CreatePresignedNotebookUrlInput) SetSessionId(v string) *CreatePresignedNotebookUrlInput { - s.SessionId = &v - return s -} - -type CreatePresignedNotebookUrlOutput struct { - _ struct{} `type:"structure"` - - // The authentication token for the notebook. - // - // AuthToken is a required field - AuthToken *string `type:"string" required:"true"` - - // The UTC epoch time when the authentication token expires. - // - // AuthTokenExpirationTime is a required field - AuthTokenExpirationTime *int64 `type:"long" required:"true"` - - // The URL of the notebook. The URL includes the authentication token and notebook - // file name and points directly to the opened notebook. - // - // NotebookUrl is a required field - NotebookUrl *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePresignedNotebookUrlOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreatePresignedNotebookUrlOutput) GoString() string { - return s.String() -} - -// SetAuthToken sets the AuthToken field's value. -func (s *CreatePresignedNotebookUrlOutput) SetAuthToken(v string) *CreatePresignedNotebookUrlOutput { - s.AuthToken = &v - return s -} - -// SetAuthTokenExpirationTime sets the AuthTokenExpirationTime field's value. -func (s *CreatePresignedNotebookUrlOutput) SetAuthTokenExpirationTime(v int64) *CreatePresignedNotebookUrlOutput { - s.AuthTokenExpirationTime = &v - return s -} - -// SetNotebookUrl sets the NotebookUrl field's value. -func (s *CreatePresignedNotebookUrlOutput) SetNotebookUrl(v string) *CreatePresignedNotebookUrlOutput { - s.NotebookUrl = &v - return s -} - -type CreateWorkGroupInput struct { - _ struct{} `type:"structure"` - - // Contains configuration information for creating an Athena SQL workgroup or - // Spark enabled Athena workgroup. Athena SQL workgroup configuration includes - // the location in Amazon S3 where query and calculation results are stored, - // the encryption configuration, if any, used for encrypting query results, - // whether the Amazon CloudWatch Metrics are enabled for the workgroup, the - // limit for the amount of bytes scanned (cutoff) per query, if it is specified, - // and whether workgroup's settings (specified with EnforceWorkGroupConfiguration) - // in the WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - Configuration *WorkGroupConfiguration `type:"structure"` - - // The workgroup description. - Description *string `type:"string"` - - // The workgroup name. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // A list of comma separated tags to add to the workgroup that is created. - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateWorkGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateWorkGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateWorkGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateWorkGroupInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Configuration != nil { - if err := s.Configuration.Validate(); err != nil { - invalidParams.AddNested("Configuration", err.(request.ErrInvalidParams)) - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConfiguration sets the Configuration field's value. -func (s *CreateWorkGroupInput) SetConfiguration(v *WorkGroupConfiguration) *CreateWorkGroupInput { - s.Configuration = v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateWorkGroupInput) SetDescription(v string) *CreateWorkGroupInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateWorkGroupInput) SetName(v string) *CreateWorkGroupInput { - s.Name = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateWorkGroupInput) SetTags(v []*Tag) *CreateWorkGroupInput { - s.Tags = v - return s -} - -type CreateWorkGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateWorkGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateWorkGroupOutput) GoString() string { - return s.String() -} - -// Specifies the customer managed KMS key that is used to encrypt the user's -// data stores in Athena. When an Amazon Web Services managed key is used, this -// value is null. This setting does not apply to Athena SQL workgroups. -type CustomerContentEncryptionConfiguration struct { - _ struct{} `type:"structure"` - - // The customer managed KMS key that is used to encrypt the user's data stores - // in Athena. - // - // KmsKey is a required field - KmsKey *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomerContentEncryptionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomerContentEncryptionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CustomerContentEncryptionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CustomerContentEncryptionConfiguration"} - if s.KmsKey == nil { - invalidParams.Add(request.NewErrParamRequired("KmsKey")) - } - if s.KmsKey != nil && len(*s.KmsKey) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KmsKey", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKmsKey sets the KmsKey field's value. -func (s *CustomerContentEncryptionConfiguration) SetKmsKey(v string) *CustomerContentEncryptionConfiguration { - s.KmsKey = &v - return s -} - -// Contains information about a data catalog in an Amazon Web Services account. -// -// In the Athena console, data catalogs are listed as "data sources" on the -// Data sources page under the Data source name column. -type DataCatalog struct { - _ struct{} `type:"structure"` - - // An optional description of the data catalog. - Description *string `min:"1" type:"string"` - - // The name of the data catalog. The catalog name must be unique for the Amazon - // Web Services account and can use a maximum of 127 alphanumeric, underscore, - // at sign, or hyphen characters. The remainder of the length constraint of - // 256 is reserved for use by Athena. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Specifies the Lambda function or functions to use for the data catalog. This - // is a mapping whose values depend on the catalog type. - // - // * For the HIVE data catalog type, use the following syntax. The metadata-function - // parameter is required. The sdk-version parameter is optional and defaults - // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number - // - // * For the LAMBDA data catalog type, use one of the following sets of required - // parameters, but not both. If you have one Lambda function that processes - // metadata and another for reading the actual data, use the following syntax. - // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn - // If you have a composite Lambda function that processes both metadata and - // data, use the following syntax to specify your Lambda function. function=lambda_arn - // - // * The GLUE type takes a catalog ID parameter and is required. The catalog_id - // is the account ID of the Amazon Web Services account to which the Glue - // catalog belongs. catalog-id=catalog_id The GLUE data catalog type also - // applies to the default AwsDataCatalog that already exists in your account, - // of which you can have only one and cannot modify. - Parameters map[string]*string `type:"map"` - - // The type of data catalog to create: LAMBDA for a federated catalog, HIVE - // for an external hive metastore, or GLUE for an Glue Data Catalog. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"DataCatalogType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DataCatalog) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DataCatalog) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *DataCatalog) SetDescription(v string) *DataCatalog { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *DataCatalog) SetName(v string) *DataCatalog { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *DataCatalog) SetParameters(v map[string]*string) *DataCatalog { - s.Parameters = v - return s -} - -// SetType sets the Type field's value. -func (s *DataCatalog) SetType(v string) *DataCatalog { - s.Type = &v - return s -} - -// The summary information for the data catalog, which includes its name and -// type. -type DataCatalogSummary struct { - _ struct{} `type:"structure"` - - // The name of the data catalog. The catalog name is unique for the Amazon Web - // Services account and can use a maximum of 127 alphanumeric, underscore, at - // sign, or hyphen characters. The remainder of the length constraint of 256 - // is reserved for use by Athena. - CatalogName *string `min:"1" type:"string"` - - // The data catalog type. - Type *string `type:"string" enum:"DataCatalogType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DataCatalogSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DataCatalogSummary) GoString() string { - return s.String() -} - -// SetCatalogName sets the CatalogName field's value. -func (s *DataCatalogSummary) SetCatalogName(v string) *DataCatalogSummary { - s.CatalogName = &v - return s -} - -// SetType sets the Type field's value. -func (s *DataCatalogSummary) SetType(v string) *DataCatalogSummary { - s.Type = &v - return s -} - -// Contains metadata information for a database in a data catalog. -type Database struct { - _ struct{} `type:"structure"` - - // An optional description of the database. - Description *string `min:"1" type:"string"` - - // The name of the database. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A set of custom key/value pairs. - Parameters map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Database) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Database) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *Database) SetDescription(v string) *Database { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *Database) SetName(v string) *Database { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *Database) SetParameters(v map[string]*string) *Database { - s.Parameters = v - return s -} - -// A piece of data (a field in the table). -type Datum struct { - _ struct{} `type:"structure"` - - // The value of the datum. - VarCharValue *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Datum) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Datum) GoString() string { - return s.String() -} - -// SetVarCharValue sets the VarCharValue field's value. -func (s *Datum) SetVarCharValue(v string) *Datum { - s.VarCharValue = &v - return s -} - -type DeleteCapacityReservationInput struct { - _ struct{} `type:"structure"` - - // The name of the capacity reservation to delete. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCapacityReservationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCapacityReservationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCapacityReservationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCapacityReservationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteCapacityReservationInput) SetName(v string) *DeleteCapacityReservationInput { - s.Name = &v - return s -} - -type DeleteCapacityReservationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCapacityReservationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCapacityReservationOutput) GoString() string { - return s.String() -} - -type DeleteDataCatalogInput struct { - _ struct{} `type:"structure"` - - // The name of the data catalog to delete. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteDataCatalogInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteDataCatalogInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDataCatalogInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *DeleteDataCatalogInput) SetName(v string) *DeleteDataCatalogInput { - s.Name = &v - return s -} - -type DeleteDataCatalogOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteDataCatalogOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteDataCatalogOutput) GoString() string { - return s.String() -} - -type DeleteNamedQueryInput struct { - _ struct{} `type:"structure"` - - // The unique ID of the query to delete. - NamedQueryId *string `min:"1" type:"string" idempotencyToken:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNamedQueryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNamedQueryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteNamedQueryInput"} - if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *DeleteNamedQueryInput) SetNamedQueryId(v string) *DeleteNamedQueryInput { - s.NamedQueryId = &v - return s -} - -type DeleteNamedQueryOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNamedQueryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNamedQueryOutput) GoString() string { - return s.String() -} - -type DeleteNotebookInput struct { - _ struct{} `type:"structure"` - - // The ID of the notebook to delete. - // - // NotebookId is a required field - NotebookId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNotebookInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNotebookInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteNotebookInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteNotebookInput"} - if s.NotebookId == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookId")) - } - if s.NotebookId != nil && len(*s.NotebookId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookId sets the NotebookId field's value. -func (s *DeleteNotebookInput) SetNotebookId(v string) *DeleteNotebookInput { - s.NotebookId = &v - return s -} - -type DeleteNotebookOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNotebookOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNotebookOutput) GoString() string { - return s.String() -} - -type DeletePreparedStatementInput struct { - _ struct{} `type:"structure"` - - // The name of the prepared statement to delete. - // - // StatementName is a required field - StatementName *string `min:"1" type:"string" required:"true"` - - // The workgroup to which the statement to be deleted belongs. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePreparedStatementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePreparedStatementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePreparedStatementInput"} - if s.StatementName == nil { - invalidParams.Add(request.NewErrParamRequired("StatementName")) - } - if s.StatementName != nil && len(*s.StatementName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatementName sets the StatementName field's value. -func (s *DeletePreparedStatementInput) SetStatementName(v string) *DeletePreparedStatementInput { - s.StatementName = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *DeletePreparedStatementInput) SetWorkGroup(v string) *DeletePreparedStatementInput { - s.WorkGroup = &v - return s -} - -type DeletePreparedStatementOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePreparedStatementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeletePreparedStatementOutput) GoString() string { - return s.String() -} - -type DeleteWorkGroupInput struct { - _ struct{} `type:"structure"` - - // The option to delete the workgroup and its contents even if the workgroup - // contains any named queries, query executions, or notebooks. - RecursiveDeleteOption *bool `type:"boolean"` - - // The unique name of the workgroup to delete. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteWorkGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteWorkGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteWorkGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteWorkGroupInput"} - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRecursiveDeleteOption sets the RecursiveDeleteOption field's value. -func (s *DeleteWorkGroupInput) SetRecursiveDeleteOption(v bool) *DeleteWorkGroupInput { - s.RecursiveDeleteOption = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *DeleteWorkGroupInput) SetWorkGroup(v string) *DeleteWorkGroupInput { - s.WorkGroup = &v - return s -} - -type DeleteWorkGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteWorkGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteWorkGroupOutput) GoString() string { - return s.String() -} - -// If query and calculation results are encrypted in Amazon S3, indicates the -// encryption option used (for example, SSE_KMS or CSE_KMS) and key information. -type EncryptionConfiguration struct { - _ struct{} `type:"structure"` - - // Indicates whether Amazon S3 server-side encryption with Amazon S3-managed - // keys (SSE_S3), server-side encryption with KMS-managed keys (SSE_KMS), or - // client-side encryption with KMS-managed keys (CSE_KMS) is used. - // - // If a query runs in a workgroup and the workgroup overrides client-side settings, - // then the workgroup's setting for encryption is used. It specifies whether - // query results must be encrypted, for all queries that run in this workgroup. - // - // EncryptionOption is a required field - EncryptionOption *string `type:"string" required:"true" enum:"EncryptionOption"` - - // For SSE_KMS and CSE_KMS, this is the KMS key ARN or ID. - KmsKey *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptionConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"} - if s.EncryptionOption == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptionOption")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionOption sets the EncryptionOption field's value. -func (s *EncryptionConfiguration) SetEncryptionOption(v string) *EncryptionConfiguration { - s.EncryptionOption = &v - return s -} - -// SetKmsKey sets the KmsKey field's value. -func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration { - s.KmsKey = &v - return s -} - -// Contains data processing unit (DPU) configuration settings and parameter -// mappings for a notebook engine. -type EngineConfiguration struct { - _ struct{} `type:"structure"` - - // Contains additional notebook engine MAP parameter mappings - // in the form of key-value pairs. To specify an Athena notebook that the Jupyter - // server will download and serve, specify a value for the StartSessionRequest$NotebookVersion - // field, and then add a key named NotebookId to AdditionalConfigs that has - // the value of the Athena notebook ID. - AdditionalConfigs map[string]*string `type:"map"` - - // The number of DPUs to use for the coordinator. A coordinator is a special - // executor that orchestrates processing work and manages other executors in - // a notebook session. The default is 1. - CoordinatorDpuSize *int64 `min:"1" type:"integer"` - - // The default number of DPUs to use for executors. An executor is the smallest - // unit of compute that a notebook session can request from Athena. The default - // is 1. - DefaultExecutorDpuSize *int64 `min:"1" type:"integer"` - - // The maximum number of DPUs that can run concurrently. - // - // MaxConcurrentDpus is a required field - MaxConcurrentDpus *int64 `min:"2" type:"integer" required:"true"` - - // Specifies custom jar files and Spark properties for use cases like cluster - // encryption, table formats, and general Spark tuning. - SparkProperties map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EngineConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EngineConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EngineConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EngineConfiguration"} - if s.CoordinatorDpuSize != nil && *s.CoordinatorDpuSize < 1 { - invalidParams.Add(request.NewErrParamMinValue("CoordinatorDpuSize", 1)) - } - if s.DefaultExecutorDpuSize != nil && *s.DefaultExecutorDpuSize < 1 { - invalidParams.Add(request.NewErrParamMinValue("DefaultExecutorDpuSize", 1)) - } - if s.MaxConcurrentDpus == nil { - invalidParams.Add(request.NewErrParamRequired("MaxConcurrentDpus")) - } - if s.MaxConcurrentDpus != nil && *s.MaxConcurrentDpus < 2 { - invalidParams.Add(request.NewErrParamMinValue("MaxConcurrentDpus", 2)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAdditionalConfigs sets the AdditionalConfigs field's value. -func (s *EngineConfiguration) SetAdditionalConfigs(v map[string]*string) *EngineConfiguration { - s.AdditionalConfigs = v - return s -} - -// SetCoordinatorDpuSize sets the CoordinatorDpuSize field's value. -func (s *EngineConfiguration) SetCoordinatorDpuSize(v int64) *EngineConfiguration { - s.CoordinatorDpuSize = &v - return s -} - -// SetDefaultExecutorDpuSize sets the DefaultExecutorDpuSize field's value. -func (s *EngineConfiguration) SetDefaultExecutorDpuSize(v int64) *EngineConfiguration { - s.DefaultExecutorDpuSize = &v - return s -} - -// SetMaxConcurrentDpus sets the MaxConcurrentDpus field's value. -func (s *EngineConfiguration) SetMaxConcurrentDpus(v int64) *EngineConfiguration { - s.MaxConcurrentDpus = &v - return s -} - -// SetSparkProperties sets the SparkProperties field's value. -func (s *EngineConfiguration) SetSparkProperties(v map[string]*string) *EngineConfiguration { - s.SparkProperties = v - return s -} - -// The Athena engine version for running queries, or the PySpark engine version -// for running sessions. -type EngineVersion struct { - _ struct{} `type:"structure"` - - // Read only. The engine version on which the query runs. If the user requests - // a valid engine version other than Auto, the effective engine version is the - // same as the engine version that the user requested. If the user requests - // Auto, the effective engine version is chosen by Athena. When a request to - // update the engine version is made by a CreateWorkGroup or UpdateWorkGroup - // operation, the EffectiveEngineVersion field is ignored. - EffectiveEngineVersion *string `min:"1" type:"string"` - - // The engine version requested by the user. Possible values are determined - // by the output of ListEngineVersions, including AUTO. The default is AUTO. - SelectedEngineVersion *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EngineVersion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EngineVersion) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EngineVersion) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EngineVersion"} - if s.EffectiveEngineVersion != nil && len(*s.EffectiveEngineVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EffectiveEngineVersion", 1)) - } - if s.SelectedEngineVersion != nil && len(*s.SelectedEngineVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SelectedEngineVersion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEffectiveEngineVersion sets the EffectiveEngineVersion field's value. -func (s *EngineVersion) SetEffectiveEngineVersion(v string) *EngineVersion { - s.EffectiveEngineVersion = &v - return s -} - -// SetSelectedEngineVersion sets the SelectedEngineVersion field's value. -func (s *EngineVersion) SetSelectedEngineVersion(v string) *EngineVersion { - s.SelectedEngineVersion = &v - return s -} - -// Contains summary information about an executor. -type ExecutorsSummary struct { - _ struct{} `type:"structure"` - - // The UUID of the executor. - // - // ExecutorId is a required field - ExecutorId *string `type:"string" required:"true"` - - // The smallest unit of compute that a session can request from Athena. Size - // is measured in data processing unit (DPU) values, a relative measure of processing - // power. - ExecutorSize *int64 `type:"long"` - - // The processing state of the executor. A description of each state follows. - // - // CREATING - The executor is being started, including acquiring resources. - // - // CREATED - The executor has been started. - // - // REGISTERED - The executor has been registered. - // - // TERMINATING - The executor is in the process of shutting down. - // - // TERMINATED - The executor is no longer running. - // - // FAILED - Due to a failure, the executor is no longer running. - ExecutorState *string `type:"string" enum:"ExecutorState"` - - // The type of executor used for the application (COORDINATOR, GATEWAY, or WORKER). - ExecutorType *string `type:"string" enum:"ExecutorType"` - - // The date and time that the executor started. - StartDateTime *int64 `type:"long"` - - // The date and time that the executor was terminated. - TerminationDateTime *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecutorsSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExecutorsSummary) GoString() string { - return s.String() -} - -// SetExecutorId sets the ExecutorId field's value. -func (s *ExecutorsSummary) SetExecutorId(v string) *ExecutorsSummary { - s.ExecutorId = &v - return s -} - -// SetExecutorSize sets the ExecutorSize field's value. -func (s *ExecutorsSummary) SetExecutorSize(v int64) *ExecutorsSummary { - s.ExecutorSize = &v - return s -} - -// SetExecutorState sets the ExecutorState field's value. -func (s *ExecutorsSummary) SetExecutorState(v string) *ExecutorsSummary { - s.ExecutorState = &v - return s -} - -// SetExecutorType sets the ExecutorType field's value. -func (s *ExecutorsSummary) SetExecutorType(v string) *ExecutorsSummary { - s.ExecutorType = &v - return s -} - -// SetStartDateTime sets the StartDateTime field's value. -func (s *ExecutorsSummary) SetStartDateTime(v int64) *ExecutorsSummary { - s.StartDateTime = &v - return s -} - -// SetTerminationDateTime sets the TerminationDateTime field's value. -func (s *ExecutorsSummary) SetTerminationDateTime(v int64) *ExecutorsSummary { - s.TerminationDateTime = &v - return s -} - -type ExportNotebookInput struct { - _ struct{} `type:"structure"` - - // The ID of the notebook to export. - // - // NotebookId is a required field - NotebookId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportNotebookInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportNotebookInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExportNotebookInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExportNotebookInput"} - if s.NotebookId == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookId")) - } - if s.NotebookId != nil && len(*s.NotebookId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookId sets the NotebookId field's value. -func (s *ExportNotebookInput) SetNotebookId(v string) *ExportNotebookInput { - s.NotebookId = &v - return s -} - -type ExportNotebookOutput struct { - _ struct{} `type:"structure"` - - // The notebook metadata, including notebook ID, notebook name, and workgroup - // name. - NotebookMetadata *NotebookMetadata `type:"structure"` - - // The content of the exported notebook. - Payload *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportNotebookOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExportNotebookOutput) GoString() string { - return s.String() -} - -// SetNotebookMetadata sets the NotebookMetadata field's value. -func (s *ExportNotebookOutput) SetNotebookMetadata(v *NotebookMetadata) *ExportNotebookOutput { - s.NotebookMetadata = v - return s -} - -// SetPayload sets the Payload field's value. -func (s *ExportNotebookOutput) SetPayload(v string) *ExportNotebookOutput { - s.Payload = &v - return s -} - -// A string for searching notebook names. -type FilterDefinition struct { - _ struct{} `type:"structure"` - - // The name of the notebook to search for. - Name *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FilterDefinition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s FilterDefinition) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *FilterDefinition) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FilterDefinition"} - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *FilterDefinition) SetName(v string) *FilterDefinition { - s.Name = &v - return s -} - -type GetCalculationExecutionCodeInput struct { - _ struct{} `type:"structure"` - - // The calculation execution UUID. - // - // CalculationExecutionId is a required field - CalculationExecutionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionCodeInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionCodeInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCalculationExecutionCodeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCalculationExecutionCodeInput"} - if s.CalculationExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) - } - if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCalculationExecutionId sets the CalculationExecutionId field's value. -func (s *GetCalculationExecutionCodeInput) SetCalculationExecutionId(v string) *GetCalculationExecutionCodeInput { - s.CalculationExecutionId = &v - return s -} - -type GetCalculationExecutionCodeOutput struct { - _ struct{} `type:"structure"` - - // The unencrypted code that was executed for the calculation. - CodeBlock *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionCodeOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionCodeOutput) GoString() string { - return s.String() -} - -// SetCodeBlock sets the CodeBlock field's value. -func (s *GetCalculationExecutionCodeOutput) SetCodeBlock(v string) *GetCalculationExecutionCodeOutput { - s.CodeBlock = &v - return s -} - -type GetCalculationExecutionInput struct { - _ struct{} `type:"structure"` - - // The calculation execution UUID. - // - // CalculationExecutionId is a required field - CalculationExecutionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCalculationExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCalculationExecutionInput"} - if s.CalculationExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) - } - if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCalculationExecutionId sets the CalculationExecutionId field's value. -func (s *GetCalculationExecutionInput) SetCalculationExecutionId(v string) *GetCalculationExecutionInput { - s.CalculationExecutionId = &v - return s -} - -type GetCalculationExecutionOutput struct { - _ struct{} `type:"structure"` - - // The calculation execution UUID. - CalculationExecutionId *string `min:"1" type:"string"` - - // The description of the calculation execution. - Description *string `min:"1" type:"string"` - - // Contains result information. This field is populated only if the calculation - // is completed. - Result *CalculationResult `type:"structure"` - - // The session ID that the calculation ran in. - SessionId *string `min:"1" type:"string"` - - // Contains information about the data processing unit (DPU) execution time - // and progress. This field is populated only when statistics are available. - Statistics *CalculationStatistics `type:"structure"` - - // Contains information about the status of the calculation. - Status *CalculationStatus `type:"structure"` - - // The Amazon S3 location in which calculation results are stored. - WorkingDirectory *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionOutput) GoString() string { - return s.String() -} - -// SetCalculationExecutionId sets the CalculationExecutionId field's value. -func (s *GetCalculationExecutionOutput) SetCalculationExecutionId(v string) *GetCalculationExecutionOutput { - s.CalculationExecutionId = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *GetCalculationExecutionOutput) SetDescription(v string) *GetCalculationExecutionOutput { - s.Description = &v - return s -} - -// SetResult sets the Result field's value. -func (s *GetCalculationExecutionOutput) SetResult(v *CalculationResult) *GetCalculationExecutionOutput { - s.Result = v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *GetCalculationExecutionOutput) SetSessionId(v string) *GetCalculationExecutionOutput { - s.SessionId = &v - return s -} - -// SetStatistics sets the Statistics field's value. -func (s *GetCalculationExecutionOutput) SetStatistics(v *CalculationStatistics) *GetCalculationExecutionOutput { - s.Statistics = v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetCalculationExecutionOutput) SetStatus(v *CalculationStatus) *GetCalculationExecutionOutput { - s.Status = v - return s -} - -// SetWorkingDirectory sets the WorkingDirectory field's value. -func (s *GetCalculationExecutionOutput) SetWorkingDirectory(v string) *GetCalculationExecutionOutput { - s.WorkingDirectory = &v - return s -} - -type GetCalculationExecutionStatusInput struct { - _ struct{} `type:"structure"` - - // The calculation execution UUID. - // - // CalculationExecutionId is a required field - CalculationExecutionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCalculationExecutionStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCalculationExecutionStatusInput"} - if s.CalculationExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) - } - if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCalculationExecutionId sets the CalculationExecutionId field's value. -func (s *GetCalculationExecutionStatusInput) SetCalculationExecutionId(v string) *GetCalculationExecutionStatusInput { - s.CalculationExecutionId = &v - return s -} - -type GetCalculationExecutionStatusOutput struct { - _ struct{} `type:"structure"` - - // Contains information about the DPU execution time and progress. - Statistics *CalculationStatistics `type:"structure"` - - // Contains information about the calculation execution status. - Status *CalculationStatus `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCalculationExecutionStatusOutput) GoString() string { - return s.String() -} - -// SetStatistics sets the Statistics field's value. -func (s *GetCalculationExecutionStatusOutput) SetStatistics(v *CalculationStatistics) *GetCalculationExecutionStatusOutput { - s.Statistics = v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetCalculationExecutionStatusOutput) SetStatus(v *CalculationStatus) *GetCalculationExecutionStatusOutput { - s.Status = v - return s -} - -type GetCapacityAssignmentConfigurationInput struct { - _ struct{} `type:"structure"` - - // The name of the capacity reservation to retrieve the capacity assignment - // configuration for. - // - // CapacityReservationName is a required field - CapacityReservationName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityAssignmentConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityAssignmentConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCapacityAssignmentConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCapacityAssignmentConfigurationInput"} - if s.CapacityReservationName == nil { - invalidParams.Add(request.NewErrParamRequired("CapacityReservationName")) - } - if s.CapacityReservationName != nil && len(*s.CapacityReservationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CapacityReservationName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCapacityReservationName sets the CapacityReservationName field's value. -func (s *GetCapacityAssignmentConfigurationInput) SetCapacityReservationName(v string) *GetCapacityAssignmentConfigurationInput { - s.CapacityReservationName = &v - return s -} - -type GetCapacityAssignmentConfigurationOutput struct { - _ struct{} `type:"structure"` - - // The requested capacity assignment configuration for the specified capacity - // reservation. - // - // CapacityAssignmentConfiguration is a required field - CapacityAssignmentConfiguration *CapacityAssignmentConfiguration `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityAssignmentConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityAssignmentConfigurationOutput) GoString() string { - return s.String() -} - -// SetCapacityAssignmentConfiguration sets the CapacityAssignmentConfiguration field's value. -func (s *GetCapacityAssignmentConfigurationOutput) SetCapacityAssignmentConfiguration(v *CapacityAssignmentConfiguration) *GetCapacityAssignmentConfigurationOutput { - s.CapacityAssignmentConfiguration = v - return s -} - -type GetCapacityReservationInput struct { - _ struct{} `type:"structure"` - - // The name of the capacity reservation. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityReservationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityReservationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCapacityReservationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCapacityReservationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetCapacityReservationInput) SetName(v string) *GetCapacityReservationInput { - s.Name = &v - return s -} - -type GetCapacityReservationOutput struct { - _ struct{} `type:"structure"` - - // The requested capacity reservation structure. - // - // CapacityReservation is a required field - CapacityReservation *CapacityReservation `type:"structure" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityReservationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetCapacityReservationOutput) GoString() string { - return s.String() -} - -// SetCapacityReservation sets the CapacityReservation field's value. -func (s *GetCapacityReservationOutput) SetCapacityReservation(v *CapacityReservation) *GetCapacityReservationOutput { - s.CapacityReservation = v - return s -} - -type GetDataCatalogInput struct { - _ struct{} `type:"structure"` - - // The name of the data catalog to return. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The name of the workgroup. Required if making an IAM Identity Center request. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDataCatalogInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDataCatalogInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDataCatalogInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *GetDataCatalogInput) SetName(v string) *GetDataCatalogInput { - s.Name = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetDataCatalogInput) SetWorkGroup(v string) *GetDataCatalogInput { - s.WorkGroup = &v - return s -} - -type GetDataCatalogOutput struct { - _ struct{} `type:"structure"` - - // The data catalog returned. - DataCatalog *DataCatalog `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDataCatalogOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDataCatalogOutput) GoString() string { - return s.String() -} - -// SetDataCatalog sets the DataCatalog field's value. -func (s *GetDataCatalogOutput) SetDataCatalog(v *DataCatalog) *GetDataCatalogOutput { - s.DataCatalog = v - return s -} - -type GetDatabaseInput struct { - _ struct{} `type:"structure"` - - // The name of the data catalog that contains the database to return. - // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` - - // The name of the database to return. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the workgroup for which the metadata is being fetched. Required - // if requesting an IAM Identity Center enabled Glue Data Catalog. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDatabaseInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDatabaseInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDatabaseInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogName sets the CatalogName field's value. -func (s *GetDatabaseInput) SetCatalogName(v string) *GetDatabaseInput { - s.CatalogName = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetDatabaseInput) SetDatabaseName(v string) *GetDatabaseInput { - s.DatabaseName = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetDatabaseInput) SetWorkGroup(v string) *GetDatabaseInput { - s.WorkGroup = &v - return s -} - -type GetDatabaseOutput struct { - _ struct{} `type:"structure"` - - // The database returned. - Database *Database `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDatabaseOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetDatabaseOutput) GoString() string { - return s.String() -} - -// SetDatabase sets the Database field's value. -func (s *GetDatabaseOutput) SetDatabase(v *Database) *GetDatabaseOutput { - s.Database = v - return s -} - -type GetNamedQueryInput struct { - _ struct{} `type:"structure"` - - // The unique ID of the query. Use ListNamedQueries to get query IDs. - // - // NamedQueryId is a required field - NamedQueryId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNamedQueryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNamedQueryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetNamedQueryInput"} - if s.NamedQueryId == nil { - invalidParams.Add(request.NewErrParamRequired("NamedQueryId")) - } - if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *GetNamedQueryInput) SetNamedQueryId(v string) *GetNamedQueryInput { - s.NamedQueryId = &v - return s -} - -type GetNamedQueryOutput struct { - _ struct{} `type:"structure"` - - // Information about the query. - NamedQuery *NamedQuery `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNamedQueryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNamedQueryOutput) GoString() string { - return s.String() -} - -// SetNamedQuery sets the NamedQuery field's value. -func (s *GetNamedQueryOutput) SetNamedQuery(v *NamedQuery) *GetNamedQueryOutput { - s.NamedQuery = v - return s -} - -type GetNotebookMetadataInput struct { - _ struct{} `type:"structure"` - - // The ID of the notebook whose metadata is to be retrieved. - // - // NotebookId is a required field - NotebookId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNotebookMetadataInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNotebookMetadataInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetNotebookMetadataInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetNotebookMetadataInput"} - if s.NotebookId == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookId")) - } - if s.NotebookId != nil && len(*s.NotebookId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetNotebookId sets the NotebookId field's value. -func (s *GetNotebookMetadataInput) SetNotebookId(v string) *GetNotebookMetadataInput { - s.NotebookId = &v - return s -} - -type GetNotebookMetadataOutput struct { - _ struct{} `type:"structure"` - - // The metadata that is returned for the specified notebook ID. - NotebookMetadata *NotebookMetadata `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNotebookMetadataOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetNotebookMetadataOutput) GoString() string { - return s.String() -} - -// SetNotebookMetadata sets the NotebookMetadata field's value. -func (s *GetNotebookMetadataOutput) SetNotebookMetadata(v *NotebookMetadata) *GetNotebookMetadataOutput { - s.NotebookMetadata = v - return s -} - -type GetPreparedStatementInput struct { - _ struct{} `type:"structure"` - - // The name of the prepared statement to retrieve. - // - // StatementName is a required field - StatementName *string `min:"1" type:"string" required:"true"` - - // The workgroup to which the statement to be retrieved belongs. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPreparedStatementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPreparedStatementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPreparedStatementInput"} - if s.StatementName == nil { - invalidParams.Add(request.NewErrParamRequired("StatementName")) - } - if s.StatementName != nil && len(*s.StatementName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStatementName sets the StatementName field's value. -func (s *GetPreparedStatementInput) SetStatementName(v string) *GetPreparedStatementInput { - s.StatementName = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetPreparedStatementInput) SetWorkGroup(v string) *GetPreparedStatementInput { - s.WorkGroup = &v - return s -} - -type GetPreparedStatementOutput struct { - _ struct{} `type:"structure"` - - // The name of the prepared statement that was retrieved. - PreparedStatement *PreparedStatement `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPreparedStatementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPreparedStatementOutput) GoString() string { - return s.String() -} - -// SetPreparedStatement sets the PreparedStatement field's value. -func (s *GetPreparedStatementOutput) SetPreparedStatement(v *PreparedStatement) *GetPreparedStatementOutput { - s.PreparedStatement = v - return s -} - -type GetQueryExecutionInput struct { - _ struct{} `type:"structure"` - - // The unique ID of the query execution. - // - // QueryExecutionId is a required field - QueryExecutionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueryExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueryExecutionInput"} - if s.QueryExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) - } - if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *GetQueryExecutionInput) SetQueryExecutionId(v string) *GetQueryExecutionInput { - s.QueryExecutionId = &v - return s -} - -type GetQueryExecutionOutput struct { - _ struct{} `type:"structure"` - - // Information about the query execution. - QueryExecution *QueryExecution `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryExecutionOutput) GoString() string { - return s.String() -} - -// SetQueryExecution sets the QueryExecution field's value. -func (s *GetQueryExecutionOutput) SetQueryExecution(v *QueryExecution) *GetQueryExecutionOutput { - s.QueryExecution = v - return s -} - -type GetQueryResultsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results (rows) to return in this request. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The unique ID of the query execution. - // - // QueryExecutionId is a required field - QueryExecutionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryResultsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryResultsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueryResultsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueryResultsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.QueryExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) - } - if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *GetQueryResultsInput) SetMaxResults(v int64) *GetQueryResultsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *GetQueryResultsInput) SetNextToken(v string) *GetQueryResultsInput { - s.NextToken = &v - return s -} - -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *GetQueryResultsInput) SetQueryExecutionId(v string) *GetQueryResultsInput { - s.QueryExecutionId = &v - return s -} - -type GetQueryResultsOutput struct { - _ struct{} `type:"structure"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The results of the query execution. - ResultSet *ResultSet `type:"structure"` - - // The number of rows inserted with a CREATE TABLE AS SELECT statement. - UpdateCount *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryResultsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryResultsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *GetQueryResultsOutput) SetNextToken(v string) *GetQueryResultsOutput { - s.NextToken = &v - return s -} - -// SetResultSet sets the ResultSet field's value. -func (s *GetQueryResultsOutput) SetResultSet(v *ResultSet) *GetQueryResultsOutput { - s.ResultSet = v - return s -} - -// SetUpdateCount sets the UpdateCount field's value. -func (s *GetQueryResultsOutput) SetUpdateCount(v int64) *GetQueryResultsOutput { - s.UpdateCount = &v - return s -} - -type GetQueryRuntimeStatisticsInput struct { - _ struct{} `type:"structure"` - - // The unique ID of the query execution. - // - // QueryExecutionId is a required field - QueryExecutionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryRuntimeStatisticsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryRuntimeStatisticsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueryRuntimeStatisticsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueryRuntimeStatisticsInput"} - if s.QueryExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) - } - if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *GetQueryRuntimeStatisticsInput) SetQueryExecutionId(v string) *GetQueryRuntimeStatisticsInput { - s.QueryExecutionId = &v - return s -} - -type GetQueryRuntimeStatisticsOutput struct { - _ struct{} `type:"structure"` - - // Runtime statistics about the query execution. - QueryRuntimeStatistics *QueryRuntimeStatistics `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryRuntimeStatisticsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryRuntimeStatisticsOutput) GoString() string { - return s.String() -} - -// SetQueryRuntimeStatistics sets the QueryRuntimeStatistics field's value. -func (s *GetQueryRuntimeStatisticsOutput) SetQueryRuntimeStatistics(v *QueryRuntimeStatistics) *GetQueryRuntimeStatisticsOutput { - s.QueryRuntimeStatistics = v - return s -} - -type GetSessionInput struct { - _ struct{} `type:"structure"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionInput"} - if s.SessionId == nil { - invalidParams.Add(request.NewErrParamRequired("SessionId")) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSessionId sets the SessionId field's value. -func (s *GetSessionInput) SetSessionId(v string) *GetSessionInput { - s.SessionId = &v - return s -} - -type GetSessionOutput struct { - _ struct{} `type:"structure"` - - // The session description. - Description *string `min:"1" type:"string"` - - // Contains engine configuration information like DPU usage. - EngineConfiguration *EngineConfiguration `type:"structure"` - - // The engine version used by the session (for example, PySpark engine version - // 3). You can get a list of engine versions by calling ListEngineVersions. - EngineVersion *string `min:"1" type:"string"` - - // The notebook version. - NotebookVersion *string `min:"1" type:"string"` - - // Contains the workgroup configuration information used by the session. - SessionConfiguration *SessionConfiguration `type:"structure"` - - // The session ID. - SessionId *string `min:"1" type:"string"` - - // Contains the DPU execution time. - Statistics *SessionStatistics `type:"structure"` - - // Contains information about the status of the session. - Status *SessionStatus `type:"structure"` - - // The workgroup to which the session belongs. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionOutput) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *GetSessionOutput) SetDescription(v string) *GetSessionOutput { - s.Description = &v - return s -} - -// SetEngineConfiguration sets the EngineConfiguration field's value. -func (s *GetSessionOutput) SetEngineConfiguration(v *EngineConfiguration) *GetSessionOutput { - s.EngineConfiguration = v - return s -} - -// SetEngineVersion sets the EngineVersion field's value. -func (s *GetSessionOutput) SetEngineVersion(v string) *GetSessionOutput { - s.EngineVersion = &v - return s -} - -// SetNotebookVersion sets the NotebookVersion field's value. -func (s *GetSessionOutput) SetNotebookVersion(v string) *GetSessionOutput { - s.NotebookVersion = &v - return s -} - -// SetSessionConfiguration sets the SessionConfiguration field's value. -func (s *GetSessionOutput) SetSessionConfiguration(v *SessionConfiguration) *GetSessionOutput { - s.SessionConfiguration = v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *GetSessionOutput) SetSessionId(v string) *GetSessionOutput { - s.SessionId = &v - return s -} - -// SetStatistics sets the Statistics field's value. -func (s *GetSessionOutput) SetStatistics(v *SessionStatistics) *GetSessionOutput { - s.Statistics = v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetSessionOutput) SetStatus(v *SessionStatus) *GetSessionOutput { - s.Status = v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetSessionOutput) SetWorkGroup(v string) *GetSessionOutput { - s.WorkGroup = &v - return s -} - -type GetSessionStatusInput struct { - _ struct{} `type:"structure"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetSessionStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetSessionStatusInput"} - if s.SessionId == nil { - invalidParams.Add(request.NewErrParamRequired("SessionId")) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSessionId sets the SessionId field's value. -func (s *GetSessionStatusInput) SetSessionId(v string) *GetSessionStatusInput { - s.SessionId = &v - return s -} - -type GetSessionStatusOutput struct { - _ struct{} `type:"structure"` - - // The session ID. - SessionId *string `min:"1" type:"string"` - - // Contains information about the status of the session. - Status *SessionStatus `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetSessionStatusOutput) GoString() string { - return s.String() -} - -// SetSessionId sets the SessionId field's value. -func (s *GetSessionStatusOutput) SetSessionId(v string) *GetSessionStatusOutput { - s.SessionId = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *GetSessionStatusOutput) SetStatus(v *SessionStatus) *GetSessionStatusOutput { - s.Status = v - return s -} - -type GetTableMetadataInput struct { - _ struct{} `type:"structure"` - - // The name of the data catalog that contains the database and table metadata - // to return. - // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` - - // The name of the database that contains the table metadata to return. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // The name of the table for which metadata is returned. - // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` - - // The name of the workgroup for which the metadata is being fetched. Required - // if requesting an IAM Identity Center enabled Glue Data Catalog. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTableMetadataInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTableMetadataInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetTableMetadataInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTableMetadataInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogName sets the CatalogName field's value. -func (s *GetTableMetadataInput) SetCatalogName(v string) *GetTableMetadataInput { - s.CatalogName = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTableMetadataInput) SetDatabaseName(v string) *GetTableMetadataInput { - s.DatabaseName = &v - return s -} - -// SetTableName sets the TableName field's value. -func (s *GetTableMetadataInput) SetTableName(v string) *GetTableMetadataInput { - s.TableName = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetTableMetadataInput) SetWorkGroup(v string) *GetTableMetadataInput { - s.WorkGroup = &v - return s -} - -type GetTableMetadataOutput struct { - _ struct{} `type:"structure"` - - // An object that contains table metadata. - TableMetadata *TableMetadata `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTableMetadataOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetTableMetadataOutput) GoString() string { - return s.String() -} - -// SetTableMetadata sets the TableMetadata field's value. -func (s *GetTableMetadataOutput) SetTableMetadata(v *TableMetadata) *GetTableMetadataOutput { - s.TableMetadata = v - return s -} - -type GetWorkGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the workgroup. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetWorkGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetWorkGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetWorkGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetWorkGroupInput"} - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetWorkGroupInput) SetWorkGroup(v string) *GetWorkGroupInput { - s.WorkGroup = &v - return s -} - -type GetWorkGroupOutput struct { - _ struct{} `type:"structure"` - - // Information about the workgroup. - WorkGroup *WorkGroup `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetWorkGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetWorkGroupOutput) GoString() string { - return s.String() -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetWorkGroupOutput) SetWorkGroup(v *WorkGroup) *GetWorkGroupOutput { - s.WorkGroup = v - return s -} - -// Specifies whether the workgroup is IAM Identity Center supported. -type IdentityCenterConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether the workgroup is IAM Identity Center supported. - EnableIdentityCenter *bool `type:"boolean"` - - // The IAM Identity Center instance ARN that the workgroup associates to. - IdentityCenterInstanceArn *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IdentityCenterConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IdentityCenterConfiguration) GoString() string { - return s.String() -} - -// SetEnableIdentityCenter sets the EnableIdentityCenter field's value. -func (s *IdentityCenterConfiguration) SetEnableIdentityCenter(v bool) *IdentityCenterConfiguration { - s.EnableIdentityCenter = &v - return s -} - -// SetIdentityCenterInstanceArn sets the IdentityCenterInstanceArn field's value. -func (s *IdentityCenterConfiguration) SetIdentityCenterInstanceArn(v string) *IdentityCenterConfiguration { - s.IdentityCenterInstanceArn = &v - return s -} - -type ImportNotebookInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to import the notebook - // is idempotent (executes only once). - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services - // CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"1" type:"string"` - - // The name of the notebook to import. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A URI that specifies the Amazon S3 location of a notebook file in ipynb format. - NotebookS3LocationUri *string `type:"string"` - - // The notebook content to be imported. The payload must be in ipynb format. - Payload *string `min:"1" type:"string"` - - // The notebook content type. Currently, the only valid type is IPYNB. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"NotebookType"` - - // The name of the Spark enabled workgroup to import the notebook to. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportNotebookInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportNotebookInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ImportNotebookInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ImportNotebookInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Payload != nil && len(*s.Payload) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Payload", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *ImportNotebookInput) SetClientRequestToken(v string) *ImportNotebookInput { - s.ClientRequestToken = &v - return s -} - -// SetName sets the Name field's value. -func (s *ImportNotebookInput) SetName(v string) *ImportNotebookInput { - s.Name = &v - return s -} - -// SetNotebookS3LocationUri sets the NotebookS3LocationUri field's value. -func (s *ImportNotebookInput) SetNotebookS3LocationUri(v string) *ImportNotebookInput { - s.NotebookS3LocationUri = &v - return s -} - -// SetPayload sets the Payload field's value. -func (s *ImportNotebookInput) SetPayload(v string) *ImportNotebookInput { - s.Payload = &v - return s -} - -// SetType sets the Type field's value. -func (s *ImportNotebookInput) SetType(v string) *ImportNotebookInput { - s.Type = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ImportNotebookInput) SetWorkGroup(v string) *ImportNotebookInput { - s.WorkGroup = &v - return s -} - -type ImportNotebookOutput struct { - _ struct{} `type:"structure"` - - // The ID assigned to the imported notebook. - NotebookId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportNotebookOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportNotebookOutput) GoString() string { - return s.String() -} - -// SetNotebookId sets the NotebookId field's value. -func (s *ImportNotebookOutput) SetNotebookId(v string) *ImportNotebookOutput { - s.NotebookId = &v - return s -} - -// Indicates a platform issue, which may be due to a transient condition or -// outage. -type InternalServerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalServerException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalServerException) GoString() string { - return s.String() -} - -func newErrorInternalServerException(v protocol.ResponseMetadata) error { - return &InternalServerException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InternalServerException) Code() string { - return "InternalServerException" -} - -// Message returns the exception's message. -func (s *InternalServerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerException) OrigErr() error { - return nil -} - -func (s *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InternalServerException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InternalServerException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -type InvalidRequestException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - // The error code returned when the query execution failed to process, or when - // the processing request for the named query failed. - AthenaErrorCode *string `min:"1" type:"string"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRequestException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRequestException) GoString() string { - return s.String() -} - -func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { - return &InvalidRequestException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidRequestException) Code() string { - return "InvalidRequestException" -} - -// Message returns the exception's message. -func (s *InvalidRequestException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidRequestException) OrigErr() error { - return nil -} - -func (s *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidRequestException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidRequestException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ListApplicationDPUSizesInput struct { - _ struct{} `type:"structure"` - - // Specifies the maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListApplicationDPUSizesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListApplicationDPUSizesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListApplicationDPUSizesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListApplicationDPUSizesInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListApplicationDPUSizesInput) SetMaxResults(v int64) *ListApplicationDPUSizesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListApplicationDPUSizesInput) SetNextToken(v string) *ListApplicationDPUSizesInput { - s.NextToken = &v - return s -} - -type ListApplicationDPUSizesOutput struct { - _ struct{} `type:"structure"` - - // A list of the supported DPU sizes that the application runtime supports. - ApplicationDPUSizes []*ApplicationDPUSizes `type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListApplicationDPUSizesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListApplicationDPUSizesOutput) GoString() string { - return s.String() -} - -// SetApplicationDPUSizes sets the ApplicationDPUSizes field's value. -func (s *ListApplicationDPUSizesOutput) SetApplicationDPUSizes(v []*ApplicationDPUSizes) *ListApplicationDPUSizesOutput { - s.ApplicationDPUSizes = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListApplicationDPUSizesOutput) SetNextToken(v string) *ListApplicationDPUSizesOutput { - s.NextToken = &v - return s -} - -type ListCalculationExecutionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of calculation executions to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `type:"string"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` - - // A filter for a specific calculation execution state. A description of each - // state follows. - // - // CREATING - The calculation is in the process of being created. - // - // CREATED - The calculation has been created and is ready to run. - // - // QUEUED - The calculation has been queued for processing. - // - // RUNNING - The calculation is running. - // - // CANCELING - A request to cancel the calculation has been received and the - // system is working to stop it. - // - // CANCELED - The calculation is no longer running as the result of a cancel - // request. - // - // COMPLETED - The calculation has completed without error. - // - // FAILED - The calculation failed and is no longer running. - StateFilter *string `type:"string" enum:"CalculationExecutionState"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCalculationExecutionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCalculationExecutionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListCalculationExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListCalculationExecutionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.SessionId == nil { - invalidParams.Add(request.NewErrParamRequired("SessionId")) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListCalculationExecutionsInput) SetMaxResults(v int64) *ListCalculationExecutionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListCalculationExecutionsInput) SetNextToken(v string) *ListCalculationExecutionsInput { - s.NextToken = &v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *ListCalculationExecutionsInput) SetSessionId(v string) *ListCalculationExecutionsInput { - s.SessionId = &v - return s -} - -// SetStateFilter sets the StateFilter field's value. -func (s *ListCalculationExecutionsInput) SetStateFilter(v string) *ListCalculationExecutionsInput { - s.StateFilter = &v - return s -} - -type ListCalculationExecutionsOutput struct { - _ struct{} `type:"structure"` - - // A list of CalculationSummary objects. - Calculations []*CalculationSummary `type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCalculationExecutionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCalculationExecutionsOutput) GoString() string { - return s.String() -} - -// SetCalculations sets the Calculations field's value. -func (s *ListCalculationExecutionsOutput) SetCalculations(v []*CalculationSummary) *ListCalculationExecutionsOutput { - s.Calculations = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListCalculationExecutionsOutput) SetNextToken(v string) *ListCalculationExecutionsOutput { - s.NextToken = &v - return s -} - -type ListCapacityReservationsInput struct { - _ struct{} `type:"structure"` - - // Specifies the maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCapacityReservationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCapacityReservationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListCapacityReservationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListCapacityReservationsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListCapacityReservationsInput) SetMaxResults(v int64) *ListCapacityReservationsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListCapacityReservationsInput) SetNextToken(v string) *ListCapacityReservationsInput { - s.NextToken = &v - return s -} - -type ListCapacityReservationsOutput struct { - _ struct{} `type:"structure"` - - // The capacity reservations for the current account. - // - // CapacityReservations is a required field - CapacityReservations []*CapacityReservation `type:"list" required:"true"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCapacityReservationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListCapacityReservationsOutput) GoString() string { - return s.String() -} - -// SetCapacityReservations sets the CapacityReservations field's value. -func (s *ListCapacityReservationsOutput) SetCapacityReservations(v []*CapacityReservation) *ListCapacityReservationsOutput { - s.CapacityReservations = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListCapacityReservationsOutput) SetNextToken(v string) *ListCapacityReservationsOutput { - s.NextToken = &v - return s -} - -type ListDataCatalogsInput struct { - _ struct{} `type:"structure"` - - // Specifies the maximum number of data catalogs to return. - MaxResults *int64 `min:"2" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The name of the workgroup. Required if making an IAM Identity Center request. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDataCatalogsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDataCatalogsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDataCatalogsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDataCatalogsInput"} - if s.MaxResults != nil && *s.MaxResults < 2 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 2)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListDataCatalogsInput) SetMaxResults(v int64) *ListDataCatalogsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDataCatalogsInput) SetNextToken(v string) *ListDataCatalogsInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListDataCatalogsInput) SetWorkGroup(v string) *ListDataCatalogsInput { - s.WorkGroup = &v - return s -} - -type ListDataCatalogsOutput struct { - _ struct{} `type:"structure"` - - // A summary list of data catalogs. - DataCatalogsSummary []*DataCatalogSummary `type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDataCatalogsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDataCatalogsOutput) GoString() string { - return s.String() -} - -// SetDataCatalogsSummary sets the DataCatalogsSummary field's value. -func (s *ListDataCatalogsOutput) SetDataCatalogsSummary(v []*DataCatalogSummary) *ListDataCatalogsOutput { - s.DataCatalogsSummary = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDataCatalogsOutput) SetNextToken(v string) *ListDataCatalogsOutput { - s.NextToken = &v - return s -} - -type ListDatabasesInput struct { - _ struct{} `type:"structure"` - - // The name of the data catalog that contains the databases to return. - // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` - - // Specifies the maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The name of the workgroup for which the metadata is being fetched. Required - // if requesting an IAM Identity Center enabled Glue Data Catalog. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDatabasesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDatabasesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDatabasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDatabasesInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogName sets the CatalogName field's value. -func (s *ListDatabasesInput) SetCatalogName(v string) *ListDatabasesInput { - s.CatalogName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListDatabasesInput) SetMaxResults(v int64) *ListDatabasesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDatabasesInput) SetNextToken(v string) *ListDatabasesInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListDatabasesInput) SetWorkGroup(v string) *ListDatabasesInput { - s.WorkGroup = &v - return s -} - -type ListDatabasesOutput struct { - _ struct{} `type:"structure"` - - // A list of databases from a data catalog. - DatabaseList []*Database `type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDatabasesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDatabasesOutput) GoString() string { - return s.String() -} - -// SetDatabaseList sets the DatabaseList field's value. -func (s *ListDatabasesOutput) SetDatabaseList(v []*Database) *ListDatabasesOutput { - s.DatabaseList = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDatabasesOutput) SetNextToken(v string) *ListDatabasesOutput { - s.NextToken = &v - return s -} - -type ListEngineVersionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of engine versions to return in this request. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEngineVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEngineVersionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListEngineVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListEngineVersionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListEngineVersionsInput) SetMaxResults(v int64) *ListEngineVersionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEngineVersionsInput) SetNextToken(v string) *ListEngineVersionsInput { - s.NextToken = &v - return s -} - -type ListEngineVersionsOutput struct { - _ struct{} `type:"structure"` - - // A list of engine versions that are available to choose from. - EngineVersions []*EngineVersion `type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEngineVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEngineVersionsOutput) GoString() string { - return s.String() -} - -// SetEngineVersions sets the EngineVersions field's value. -func (s *ListEngineVersionsOutput) SetEngineVersions(v []*EngineVersion) *ListEngineVersionsOutput { - s.EngineVersions = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListEngineVersionsOutput) SetNextToken(v string) *ListEngineVersionsOutput { - s.NextToken = &v - return s -} - -type ListExecutorsInput struct { - _ struct{} `type:"structure"` - - // A filter for a specific executor state. A description of each state follows. - // - // CREATING - The executor is being started, including acquiring resources. - // - // CREATED - The executor has been started. - // - // REGISTERED - The executor has been registered. - // - // TERMINATING - The executor is in the process of shutting down. - // - // TERMINATED - The executor is no longer running. - // - // FAILED - Due to a failure, the executor is no longer running. - ExecutorStateFilter *string `type:"string" enum:"ExecutorState"` - - // The maximum number of executors to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `type:"string"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExecutorsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExecutorsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListExecutorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListExecutorsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.SessionId == nil { - invalidParams.Add(request.NewErrParamRequired("SessionId")) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetExecutorStateFilter sets the ExecutorStateFilter field's value. -func (s *ListExecutorsInput) SetExecutorStateFilter(v string) *ListExecutorsInput { - s.ExecutorStateFilter = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListExecutorsInput) SetMaxResults(v int64) *ListExecutorsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListExecutorsInput) SetNextToken(v string) *ListExecutorsInput { - s.NextToken = &v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *ListExecutorsInput) SetSessionId(v string) *ListExecutorsInput { - s.SessionId = &v - return s -} - -type ListExecutorsOutput struct { - _ struct{} `type:"structure"` - - // Contains summary information about the executor. - ExecutorsSummary []*ExecutorsSummary `type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `type:"string"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExecutorsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListExecutorsOutput) GoString() string { - return s.String() -} - -// SetExecutorsSummary sets the ExecutorsSummary field's value. -func (s *ListExecutorsOutput) SetExecutorsSummary(v []*ExecutorsSummary) *ListExecutorsOutput { - s.ExecutorsSummary = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListExecutorsOutput) SetNextToken(v string) *ListExecutorsOutput { - s.NextToken = &v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *ListExecutorsOutput) SetSessionId(v string) *ListExecutorsOutput { - s.SessionId = &v - return s -} - -type ListNamedQueriesInput struct { - _ struct{} `type:"structure"` - - // The maximum number of queries to return in this request. - MaxResults *int64 `type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The name of the workgroup from which the named queries are being returned. - // If a workgroup is not specified, the saved queries for the primary workgroup - // are returned. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNamedQueriesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNamedQueriesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListNamedQueriesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListNamedQueriesInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListNamedQueriesInput) SetMaxResults(v int64) *ListNamedQueriesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNamedQueriesInput) SetNextToken(v string) *ListNamedQueriesInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListNamedQueriesInput) SetWorkGroup(v string) *ListNamedQueriesInput { - s.WorkGroup = &v - return s -} - -type ListNamedQueriesOutput struct { - _ struct{} `type:"structure"` - - // The list of unique query IDs. - NamedQueryIds []*string `min:"1" type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNamedQueriesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNamedQueriesOutput) GoString() string { - return s.String() -} - -// SetNamedQueryIds sets the NamedQueryIds field's value. -func (s *ListNamedQueriesOutput) SetNamedQueryIds(v []*string) *ListNamedQueriesOutput { - s.NamedQueryIds = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNamedQueriesOutput) SetNextToken(v string) *ListNamedQueriesOutput { - s.NextToken = &v - return s -} - -type ListNotebookMetadataInput struct { - _ struct{} `type:"structure"` - - // Search filter string. - Filters *FilterDefinition `type:"structure"` - - // Specifies the maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. - NextToken *string `min:"1" type:"string"` - - // The name of the Spark enabled workgroup to retrieve notebook metadata for. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookMetadataInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookMetadataInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListNotebookMetadataInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListNotebookMetadataInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - if s.Filters != nil { - if err := s.Filters.Validate(); err != nil { - invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFilters sets the Filters field's value. -func (s *ListNotebookMetadataInput) SetFilters(v *FilterDefinition) *ListNotebookMetadataInput { - s.Filters = v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListNotebookMetadataInput) SetMaxResults(v int64) *ListNotebookMetadataInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNotebookMetadataInput) SetNextToken(v string) *ListNotebookMetadataInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListNotebookMetadataInput) SetWorkGroup(v string) *ListNotebookMetadataInput { - s.WorkGroup = &v - return s -} - -type ListNotebookMetadataOutput struct { - _ struct{} `type:"structure"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The list of notebook metadata for the specified workgroup. - NotebookMetadataList []*NotebookMetadata `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookMetadataOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookMetadataOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNotebookMetadataOutput) SetNextToken(v string) *ListNotebookMetadataOutput { - s.NextToken = &v - return s -} - -// SetNotebookMetadataList sets the NotebookMetadataList field's value. -func (s *ListNotebookMetadataOutput) SetNotebookMetadataList(v []*NotebookMetadata) *ListNotebookMetadataOutput { - s.NotebookMetadataList = v - return s -} - -type ListNotebookSessionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of notebook sessions to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The ID of the notebook to list sessions for. - // - // NotebookId is a required field - NotebookId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookSessionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookSessionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListNotebookSessionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListNotebookSessionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.NotebookId == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookId")) - } - if s.NotebookId != nil && len(*s.NotebookId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListNotebookSessionsInput) SetMaxResults(v int64) *ListNotebookSessionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNotebookSessionsInput) SetNextToken(v string) *ListNotebookSessionsInput { - s.NextToken = &v - return s -} - -// SetNotebookId sets the NotebookId field's value. -func (s *ListNotebookSessionsInput) SetNotebookId(v string) *ListNotebookSessionsInput { - s.NotebookId = &v - return s -} - -type ListNotebookSessionsOutput struct { - _ struct{} `type:"structure"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // A list of the sessions belonging to the notebook. - // - // NotebookSessionsList is a required field - NotebookSessionsList []*NotebookSessionSummary `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookSessionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListNotebookSessionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNotebookSessionsOutput) SetNextToken(v string) *ListNotebookSessionsOutput { - s.NextToken = &v - return s -} - -// SetNotebookSessionsList sets the NotebookSessionsList field's value. -func (s *ListNotebookSessionsOutput) SetNotebookSessionsList(v []*NotebookSessionSummary) *ListNotebookSessionsOutput { - s.NotebookSessionsList = v - return s -} - -type ListPreparedStatementsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results to return in this request. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The workgroup to list the prepared statements for. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPreparedStatementsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPreparedStatementsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListPreparedStatementsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPreparedStatementsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListPreparedStatementsInput) SetMaxResults(v int64) *ListPreparedStatementsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPreparedStatementsInput) SetNextToken(v string) *ListPreparedStatementsInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListPreparedStatementsInput) SetWorkGroup(v string) *ListPreparedStatementsInput { - s.WorkGroup = &v - return s -} - -type ListPreparedStatementsOutput struct { - _ struct{} `type:"structure"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The list of prepared statements for the workgroup. - PreparedStatements []*PreparedStatementSummary `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPreparedStatementsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListPreparedStatementsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPreparedStatementsOutput) SetNextToken(v string) *ListPreparedStatementsOutput { - s.NextToken = &v - return s -} - -// SetPreparedStatements sets the PreparedStatements field's value. -func (s *ListPreparedStatementsOutput) SetPreparedStatements(v []*PreparedStatementSummary) *ListPreparedStatementsOutput { - s.PreparedStatements = v - return s -} - -type ListQueryExecutionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of query executions to return in this request. - MaxResults *int64 `type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The name of the workgroup from which queries are being returned. If a workgroup - // is not specified, a list of available query execution IDs for the queries - // in the primary workgroup is returned. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListQueryExecutionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListQueryExecutionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListQueryExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListQueryExecutionsInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListQueryExecutionsInput) SetMaxResults(v int64) *ListQueryExecutionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListQueryExecutionsInput) SetNextToken(v string) *ListQueryExecutionsInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListQueryExecutionsInput) SetWorkGroup(v string) *ListQueryExecutionsInput { - s.WorkGroup = &v - return s -} - -type ListQueryExecutionsOutput struct { - _ struct{} `type:"structure"` - - // A token to be used by the next request if this request is truncated. - NextToken *string `min:"1" type:"string"` - - // The unique IDs of each query execution as an array of strings. - QueryExecutionIds []*string `min:"1" type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListQueryExecutionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListQueryExecutionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListQueryExecutionsOutput) SetNextToken(v string) *ListQueryExecutionsOutput { - s.NextToken = &v - return s -} - -// SetQueryExecutionIds sets the QueryExecutionIds field's value. -func (s *ListQueryExecutionsOutput) SetQueryExecutionIds(v []*string) *ListQueryExecutionsOutput { - s.QueryExecutionIds = v - return s -} - -type ListSessionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of sessions to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `type:"string"` - - // A filter for a specific session state. A description of each state follows. - // - // CREATING - The session is being started, including acquiring resources. - // - // CREATED - The session has been started. - // - // IDLE - The session is able to accept a calculation. - // - // BUSY - The session is processing another task and is unable to accept a calculation. - // - // TERMINATING - The session is in the process of shutting down. - // - // TERMINATED - The session and its resources are no longer running. - // - // DEGRADED - The session has no healthy coordinators. - // - // FAILED - Due to a failure, the session and its resources are no longer running. - StateFilter *string `type:"string" enum:"SessionState"` - - // The workgroup to which the session belongs. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSessionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSessionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListSessionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListSessionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListSessionsInput) SetMaxResults(v int64) *ListSessionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSessionsInput) SetNextToken(v string) *ListSessionsInput { - s.NextToken = &v - return s -} - -// SetStateFilter sets the StateFilter field's value. -func (s *ListSessionsInput) SetStateFilter(v string) *ListSessionsInput { - s.StateFilter = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListSessionsInput) SetWorkGroup(v string) *ListSessionsInput { - s.WorkGroup = &v - return s -} - -type ListSessionsOutput struct { - _ struct{} `type:"structure"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `type:"string"` - - // A list of sessions. - Sessions []*SessionSummary `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSessionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListSessionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListSessionsOutput) SetNextToken(v string) *ListSessionsOutput { - s.NextToken = &v - return s -} - -// SetSessions sets the Sessions field's value. -func (s *ListSessionsOutput) SetSessions(v []*SessionSummary) *ListSessionsOutput { - s.Sessions = v - return s -} - -type ListTableMetadataInput struct { - _ struct{} `type:"structure"` - - // The name of the data catalog for which table metadata should be returned. - // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` - - // The name of the database for which table metadata should be returned. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A regex filter that pattern-matches table names. If no expression is supplied, - // metadata for all tables are listed. - Expression *string `type:"string"` - - // Specifies the maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The name of the workgroup for which the metadata is being fetched. Required - // if requesting an IAM Identity Center enabled Glue Data Catalog. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTableMetadataInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTableMetadataInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTableMetadataInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTableMetadataInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalogName sets the CatalogName field's value. -func (s *ListTableMetadataInput) SetCatalogName(v string) *ListTableMetadataInput { - s.CatalogName = &v - return s -} - -// SetDatabaseName sets the DatabaseName field's value. -func (s *ListTableMetadataInput) SetDatabaseName(v string) *ListTableMetadataInput { - s.DatabaseName = &v - return s -} - -// SetExpression sets the Expression field's value. -func (s *ListTableMetadataInput) SetExpression(v string) *ListTableMetadataInput { - s.Expression = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTableMetadataInput) SetMaxResults(v int64) *ListTableMetadataInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTableMetadataInput) SetNextToken(v string) *ListTableMetadataInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListTableMetadataInput) SetWorkGroup(v string) *ListTableMetadataInput { - s.WorkGroup = &v - return s -} - -type ListTableMetadataOutput struct { - _ struct{} `type:"structure"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // A list of table metadata. - TableMetadataList []*TableMetadata `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTableMetadataOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTableMetadataOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTableMetadataOutput) SetNextToken(v string) *ListTableMetadataOutput { - s.NextToken = &v - return s -} - -// SetTableMetadataList sets the TableMetadataList field's value. -func (s *ListTableMetadataOutput) SetTableMetadataList(v []*TableMetadata) *ListTableMetadataOutput { - s.TableMetadataList = v - return s -} - -type ListTagsForResourceInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results to be returned per request that lists the tags - // for the resource. - MaxResults *int64 `min:"75" type:"integer"` - - // The token for the next set of results, or null if there are no additional - // results for this request, where the request lists the tags for the resource - // with the specified ARN. - NextToken *string `min:"1" type:"string"` - - // Lists the tags for the resource with the specified ARN. - // - // ResourceARN is a required field - ResourceARN *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.MaxResults != nil && *s.MaxResults < 75 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 75)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.ResourceARN == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceARN")) - } - if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTagsForResourceInput) SetMaxResults(v int64) *ListTagsForResourceInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { - s.NextToken = &v - return s -} - -// SetResourceARN sets the ResourceARN field's value. -func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { - s.ResourceARN = &v - return s -} - -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure"` - - // A token to be used by the next request if this request is truncated. - NextToken *string `min:"1" type:"string"` - - // The list of tags associated with the specified resource. - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListTagsForResourceOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { - s.NextToken = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { - s.Tags = v - return s -} - -type ListWorkGroupsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of workgroups to return in this request. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListWorkGroupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListWorkGroupsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListWorkGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListWorkGroupsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListWorkGroupsInput) SetMaxResults(v int64) *ListWorkGroupsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListWorkGroupsInput) SetNextToken(v string) *ListWorkGroupsInput { - s.NextToken = &v - return s -} - -type ListWorkGroupsOutput struct { - _ struct{} `type:"structure"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // A list of WorkGroupSummary objects that include the names, descriptions, - // creation times, and states for each workgroup. - WorkGroups []*WorkGroupSummary `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListWorkGroupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListWorkGroupsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *ListWorkGroupsOutput) SetNextToken(v string) *ListWorkGroupsOutput { - s.NextToken = &v - return s -} - -// SetWorkGroups sets the WorkGroups field's value. -func (s *ListWorkGroupsOutput) SetWorkGroups(v []*WorkGroupSummary) *ListWorkGroupsOutput { - s.WorkGroups = v - return s -} - -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. -type MetadataException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetadataException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MetadataException) GoString() string { - return s.String() -} - -func newErrorMetadataException(v protocol.ResponseMetadata) error { - return &MetadataException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *MetadataException) Code() string { - return "MetadataException" -} - -// Message returns the exception's message. -func (s *MetadataException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *MetadataException) OrigErr() error { - return nil -} - -func (s *MetadataException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *MetadataException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *MetadataException) RequestID() string { - return s.RespMetadata.RequestID -} - -// A query, where QueryString contains the SQL statements that make up the query. -type NamedQuery struct { - _ struct{} `type:"structure"` - - // The database to which the query belongs. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` - - // The query description. - Description *string `min:"1" type:"string"` - - // The query name. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The unique identifier of the query. - NamedQueryId *string `min:"1" type:"string"` - - // The SQL statements that make up the query. - // - // QueryString is a required field - QueryString *string `min:"1" type:"string" required:"true"` - - // The name of the workgroup that contains the named query. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NamedQuery) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NamedQuery) GoString() string { - return s.String() -} - -// SetDatabase sets the Database field's value. -func (s *NamedQuery) SetDatabase(v string) *NamedQuery { - s.Database = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *NamedQuery) SetDescription(v string) *NamedQuery { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *NamedQuery) SetName(v string) *NamedQuery { - s.Name = &v - return s -} - -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *NamedQuery) SetNamedQueryId(v string) *NamedQuery { - s.NamedQueryId = &v - return s -} - -// SetQueryString sets the QueryString field's value. -func (s *NamedQuery) SetQueryString(v string) *NamedQuery { - s.QueryString = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *NamedQuery) SetWorkGroup(v string) *NamedQuery { - s.WorkGroup = &v - return s -} - -// Contains metadata for notebook, including the notebook name, ID, workgroup, -// and time created. -type NotebookMetadata struct { - _ struct{} `type:"structure"` - - // The time when the notebook was created. - CreationTime *time.Time `type:"timestamp"` - - // The time when the notebook was last modified. - LastModifiedTime *time.Time `type:"timestamp"` - - // The name of the notebook. - Name *string `min:"1" type:"string"` - - // The notebook ID. - NotebookId *string `min:"1" type:"string"` - - // The type of notebook. Currently, the only valid type is IPYNB. - Type *string `type:"string" enum:"NotebookType"` - - // The name of the Spark enabled workgroup to which the notebook belongs. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotebookMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotebookMetadata) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *NotebookMetadata) SetCreationTime(v time.Time) *NotebookMetadata { - s.CreationTime = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *NotebookMetadata) SetLastModifiedTime(v time.Time) *NotebookMetadata { - s.LastModifiedTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *NotebookMetadata) SetName(v string) *NotebookMetadata { - s.Name = &v - return s -} - -// SetNotebookId sets the NotebookId field's value. -func (s *NotebookMetadata) SetNotebookId(v string) *NotebookMetadata { - s.NotebookId = &v - return s -} - -// SetType sets the Type field's value. -func (s *NotebookMetadata) SetType(v string) *NotebookMetadata { - s.Type = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *NotebookMetadata) SetWorkGroup(v string) *NotebookMetadata { - s.WorkGroup = &v - return s -} - -// Contains the notebook session ID and notebook session creation time. -type NotebookSessionSummary struct { - _ struct{} `type:"structure"` - - // The time when the notebook session was created. - CreationTime *time.Time `type:"timestamp"` - - // The notebook session ID. - SessionId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotebookSessionSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotebookSessionSummary) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *NotebookSessionSummary) SetCreationTime(v time.Time) *NotebookSessionSummary { - s.CreationTime = &v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *NotebookSessionSummary) SetSessionId(v string) *NotebookSessionSummary { - s.SessionId = &v - return s -} - -// A prepared SQL statement for use with Athena. -type PreparedStatement struct { - _ struct{} `type:"structure"` - - // The description of the prepared statement. - Description *string `min:"1" type:"string"` - - // The last modified time of the prepared statement. - LastModifiedTime *time.Time `type:"timestamp"` - - // The query string for the prepared statement. - QueryStatement *string `min:"1" type:"string"` - - // The name of the prepared statement. - StatementName *string `min:"1" type:"string"` - - // The name of the workgroup to which the prepared statement belongs. - WorkGroupName *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PreparedStatement) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PreparedStatement) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *PreparedStatement) SetDescription(v string) *PreparedStatement { - s.Description = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *PreparedStatement) SetLastModifiedTime(v time.Time) *PreparedStatement { - s.LastModifiedTime = &v - return s -} - -// SetQueryStatement sets the QueryStatement field's value. -func (s *PreparedStatement) SetQueryStatement(v string) *PreparedStatement { - s.QueryStatement = &v - return s -} - -// SetStatementName sets the StatementName field's value. -func (s *PreparedStatement) SetStatementName(v string) *PreparedStatement { - s.StatementName = &v - return s -} - -// SetWorkGroupName sets the WorkGroupName field's value. -func (s *PreparedStatement) SetWorkGroupName(v string) *PreparedStatement { - s.WorkGroupName = &v - return s -} - -// The name and last modified time of the prepared statement. -type PreparedStatementSummary struct { - _ struct{} `type:"structure"` - - // The last modified time of the prepared statement. - LastModifiedTime *time.Time `type:"timestamp"` - - // The name of the prepared statement. - StatementName *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PreparedStatementSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PreparedStatementSummary) GoString() string { - return s.String() -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *PreparedStatementSummary) SetLastModifiedTime(v time.Time) *PreparedStatementSummary { - s.LastModifiedTime = &v - return s -} - -// SetStatementName sets the StatementName field's value. -func (s *PreparedStatementSummary) SetStatementName(v string) *PreparedStatementSummary { - s.StatementName = &v - return s -} - -type PutCapacityAssignmentConfigurationInput struct { - _ struct{} `type:"structure"` - - // The list of assignments for the capacity assignment configuration. - // - // CapacityAssignments is a required field - CapacityAssignments []*CapacityAssignment `type:"list" required:"true"` - - // The name of the capacity reservation to put a capacity assignment configuration - // for. - // - // CapacityReservationName is a required field - CapacityReservationName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutCapacityAssignmentConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutCapacityAssignmentConfigurationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutCapacityAssignmentConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutCapacityAssignmentConfigurationInput"} - if s.CapacityAssignments == nil { - invalidParams.Add(request.NewErrParamRequired("CapacityAssignments")) - } - if s.CapacityReservationName == nil { - invalidParams.Add(request.NewErrParamRequired("CapacityReservationName")) - } - if s.CapacityReservationName != nil && len(*s.CapacityReservationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CapacityReservationName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCapacityAssignments sets the CapacityAssignments field's value. -func (s *PutCapacityAssignmentConfigurationInput) SetCapacityAssignments(v []*CapacityAssignment) *PutCapacityAssignmentConfigurationInput { - s.CapacityAssignments = v - return s -} - -// SetCapacityReservationName sets the CapacityReservationName field's value. -func (s *PutCapacityAssignmentConfigurationInput) SetCapacityReservationName(v string) *PutCapacityAssignmentConfigurationInput { - s.CapacityReservationName = &v - return s -} - -type PutCapacityAssignmentConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutCapacityAssignmentConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutCapacityAssignmentConfigurationOutput) GoString() string { - return s.String() -} - -// Information about a single instance of a query execution. -type QueryExecution struct { - _ struct{} `type:"structure"` - - // The engine version that executed the query. - EngineVersion *EngineVersion `type:"structure"` - - // A list of values for the parameters in a query. The values are applied sequentially - // to the parameters in the query in the order in which the parameters occur. - // The list of parameters is not returned in the response. - ExecutionParameters []*string `min:"1" type:"list"` - - // The SQL query statements which the query execution ran. - Query *string `min:"1" type:"string"` - - // The database in which the query execution occurred. - QueryExecutionContext *QueryExecutionContext `type:"structure"` - - // The unique identifier for each query execution. - QueryExecutionId *string `min:"1" type:"string"` - - // Specifies whether Amazon S3 access grants are enabled for query results. - QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration `type:"structure"` - - // The location in Amazon S3 where query and calculation results are stored - // and the encryption option, if any, used for query results. These are known - // as "client-side settings". If workgroup settings override client-side settings, - // then the query uses the location for the query results and the encryption - // configuration that are specified for the workgroup. - ResultConfiguration *ResultConfiguration `type:"structure"` - - // Specifies the query result reuse behavior that was used for the query. - ResultReuseConfiguration *ResultReuseConfiguration `type:"structure"` - - // The type of query statement that was run. DDL indicates DDL query statements. - // DML indicates DML (Data Manipulation Language) query statements, such as - // CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL - // and DML, such as SHOW CREATE TABLE, or DESCRIBE TABLE. - StatementType *string `type:"string" enum:"StatementType"` - - // Query execution statistics, such as the amount of data scanned, the amount - // of time that the query took to process, and the type of statement that was - // run. - Statistics *QueryExecutionStatistics `type:"structure"` - - // The completion date, current state, submission time, and state change reason - // (if applicable) for the query execution. - Status *QueryExecutionStatus `type:"structure"` - - // The kind of query statement that was run. - SubstatementType *string `type:"string"` - - // The name of the workgroup in which the query ran. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecution) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecution) GoString() string { - return s.String() -} - -// SetEngineVersion sets the EngineVersion field's value. -func (s *QueryExecution) SetEngineVersion(v *EngineVersion) *QueryExecution { - s.EngineVersion = v - return s -} - -// SetExecutionParameters sets the ExecutionParameters field's value. -func (s *QueryExecution) SetExecutionParameters(v []*string) *QueryExecution { - s.ExecutionParameters = v - return s -} - -// SetQuery sets the Query field's value. -func (s *QueryExecution) SetQuery(v string) *QueryExecution { - s.Query = &v - return s -} - -// SetQueryExecutionContext sets the QueryExecutionContext field's value. -func (s *QueryExecution) SetQueryExecutionContext(v *QueryExecutionContext) *QueryExecution { - s.QueryExecutionContext = v - return s -} - -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *QueryExecution) SetQueryExecutionId(v string) *QueryExecution { - s.QueryExecutionId = &v - return s -} - -// SetQueryResultsS3AccessGrantsConfiguration sets the QueryResultsS3AccessGrantsConfiguration field's value. -func (s *QueryExecution) SetQueryResultsS3AccessGrantsConfiguration(v *QueryResultsS3AccessGrantsConfiguration) *QueryExecution { - s.QueryResultsS3AccessGrantsConfiguration = v - return s -} - -// SetResultConfiguration sets the ResultConfiguration field's value. -func (s *QueryExecution) SetResultConfiguration(v *ResultConfiguration) *QueryExecution { - s.ResultConfiguration = v - return s -} - -// SetResultReuseConfiguration sets the ResultReuseConfiguration field's value. -func (s *QueryExecution) SetResultReuseConfiguration(v *ResultReuseConfiguration) *QueryExecution { - s.ResultReuseConfiguration = v - return s -} - -// SetStatementType sets the StatementType field's value. -func (s *QueryExecution) SetStatementType(v string) *QueryExecution { - s.StatementType = &v - return s -} - -// SetStatistics sets the Statistics field's value. -func (s *QueryExecution) SetStatistics(v *QueryExecutionStatistics) *QueryExecution { - s.Statistics = v - return s -} - -// SetStatus sets the Status field's value. -func (s *QueryExecution) SetStatus(v *QueryExecutionStatus) *QueryExecution { - s.Status = v - return s -} - -// SetSubstatementType sets the SubstatementType field's value. -func (s *QueryExecution) SetSubstatementType(v string) *QueryExecution { - s.SubstatementType = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *QueryExecution) SetWorkGroup(v string) *QueryExecution { - s.WorkGroup = &v - return s -} - -// The database and data catalog context in which the query execution occurs. -type QueryExecutionContext struct { - _ struct{} `type:"structure"` - - // The name of the data catalog used in the query execution. - Catalog *string `min:"1" type:"string"` - - // The name of the database used in the query execution. The database must exist - // in the catalog. - Database *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecutionContext) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecutionContext) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueryExecutionContext) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueryExecutionContext"} - if s.Catalog != nil && len(*s.Catalog) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Catalog", 1)) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCatalog sets the Catalog field's value. -func (s *QueryExecutionContext) SetCatalog(v string) *QueryExecutionContext { - s.Catalog = &v - return s -} - -// SetDatabase sets the Database field's value. -func (s *QueryExecutionContext) SetDatabase(v string) *QueryExecutionContext { - s.Database = &v - return s -} - -// The amount of data scanned during the query execution and the amount of time -// that it took to execute, and the type of statement that was run. -type QueryExecutionStatistics struct { - _ struct{} `type:"structure"` - - // The location and file name of a data manifest file. The manifest file is - // saved to the Athena query results location in Amazon S3. The manifest file - // tracks files that the query wrote to Amazon S3. If the query fails, the manifest - // file also tracks files that the query intended to write. The manifest is - // useful for identifying orphaned files resulting from a failed query. For - // more information, see Working with Query Results, Output Files, and Query - // History (https://docs.aws.amazon.com/athena/latest/ug/querying.html) in the - // Amazon Athena User Guide. - DataManifestLocation *string `type:"string"` - - // The number of bytes in the data that was queried. - DataScannedInBytes *int64 `type:"long"` - - // The number of milliseconds that the query took to execute. - EngineExecutionTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that Athena took to plan the query processing - // flow. This includes the time spent retrieving table partitions from the data - // source. Note that because the query engine performs the query planning, query - // planning time is a subset of engine processing time. - QueryPlanningTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that the query was in your query queue waiting - // for resources. Note that if transient errors occur, Athena might automatically - // add the query back to the queue. - QueryQueueTimeInMillis *int64 `type:"long"` - - // Contains information about whether previous query results were reused for - // the query. - ResultReuseInformation *ResultReuseInformation `type:"structure"` - - // The number of milliseconds that Athena took to preprocess the query before - // submitting the query to the query engine. - ServicePreProcessingTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that Athena took to finalize and publish the query - // results after the query engine finished running the query. - ServiceProcessingTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that Athena took to run the query. - TotalExecutionTimeInMillis *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecutionStatistics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecutionStatistics) GoString() string { - return s.String() -} - -// SetDataManifestLocation sets the DataManifestLocation field's value. -func (s *QueryExecutionStatistics) SetDataManifestLocation(v string) *QueryExecutionStatistics { - s.DataManifestLocation = &v - return s -} - -// SetDataScannedInBytes sets the DataScannedInBytes field's value. -func (s *QueryExecutionStatistics) SetDataScannedInBytes(v int64) *QueryExecutionStatistics { - s.DataScannedInBytes = &v - return s -} - -// SetEngineExecutionTimeInMillis sets the EngineExecutionTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetEngineExecutionTimeInMillis(v int64) *QueryExecutionStatistics { - s.EngineExecutionTimeInMillis = &v - return s -} - -// SetQueryPlanningTimeInMillis sets the QueryPlanningTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetQueryPlanningTimeInMillis(v int64) *QueryExecutionStatistics { - s.QueryPlanningTimeInMillis = &v - return s -} - -// SetQueryQueueTimeInMillis sets the QueryQueueTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetQueryQueueTimeInMillis(v int64) *QueryExecutionStatistics { - s.QueryQueueTimeInMillis = &v - return s -} - -// SetResultReuseInformation sets the ResultReuseInformation field's value. -func (s *QueryExecutionStatistics) SetResultReuseInformation(v *ResultReuseInformation) *QueryExecutionStatistics { - s.ResultReuseInformation = v - return s -} - -// SetServicePreProcessingTimeInMillis sets the ServicePreProcessingTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetServicePreProcessingTimeInMillis(v int64) *QueryExecutionStatistics { - s.ServicePreProcessingTimeInMillis = &v - return s -} - -// SetServiceProcessingTimeInMillis sets the ServiceProcessingTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetServiceProcessingTimeInMillis(v int64) *QueryExecutionStatistics { - s.ServiceProcessingTimeInMillis = &v - return s -} - -// SetTotalExecutionTimeInMillis sets the TotalExecutionTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetTotalExecutionTimeInMillis(v int64) *QueryExecutionStatistics { - s.TotalExecutionTimeInMillis = &v - return s -} - -// The completion date, current state, submission time, and state change reason -// (if applicable) for the query execution. -type QueryExecutionStatus struct { - _ struct{} `type:"structure"` - - // Provides information about an Athena query error. - AthenaError *AthenaError `type:"structure"` - - // The date and time that the query completed. - CompletionDateTime *time.Time `type:"timestamp"` - - // The state of query execution. QUEUED indicates that the query has been submitted - // to the service, and Athena will execute the query as soon as resources are - // available. RUNNING indicates that the query is in execution phase. SUCCEEDED - // indicates that the query completed without errors. FAILED indicates that - // the query experienced an error and did not complete processing. CANCELLED - // indicates that a user input interrupted query execution. - // - // Athena automatically retries your queries in cases of certain transient errors. - // As a result, you may see the query state transition from RUNNING or FAILED - // to QUEUED. - State *string `type:"string" enum:"QueryExecutionState"` - - // Further detail about the status of the query. - StateChangeReason *string `type:"string"` - - // The date and time that the query was submitted. - SubmissionDateTime *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecutionStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryExecutionStatus) GoString() string { - return s.String() -} - -// SetAthenaError sets the AthenaError field's value. -func (s *QueryExecutionStatus) SetAthenaError(v *AthenaError) *QueryExecutionStatus { - s.AthenaError = v - return s -} - -// SetCompletionDateTime sets the CompletionDateTime field's value. -func (s *QueryExecutionStatus) SetCompletionDateTime(v time.Time) *QueryExecutionStatus { - s.CompletionDateTime = &v - return s -} - -// SetState sets the State field's value. -func (s *QueryExecutionStatus) SetState(v string) *QueryExecutionStatus { - s.State = &v - return s -} - -// SetStateChangeReason sets the StateChangeReason field's value. -func (s *QueryExecutionStatus) SetStateChangeReason(v string) *QueryExecutionStatus { - s.StateChangeReason = &v - return s -} - -// SetSubmissionDateTime sets the SubmissionDateTime field's value. -func (s *QueryExecutionStatus) SetSubmissionDateTime(v time.Time) *QueryExecutionStatus { - s.SubmissionDateTime = &v - return s -} - -// Specifies whether Amazon S3 access grants are enabled for query results. -type QueryResultsS3AccessGrantsConfiguration struct { - _ struct{} `type:"structure"` - - // The authentication type used for Amazon S3 access grants. Currently, only - // DIRECTORY_IDENTITY is supported. - // - // AuthenticationType is a required field - AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` - - // When enabled, appends the user ID as an Amazon S3 path prefix to the query - // result output location. - CreateUserLevelPrefix *bool `type:"boolean"` - - // Specifies whether Amazon S3 access grants are enabled for query results. - // - // EnableS3AccessGrants is a required field - EnableS3AccessGrants *bool `type:"boolean" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryResultsS3AccessGrantsConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryResultsS3AccessGrantsConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueryResultsS3AccessGrantsConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueryResultsS3AccessGrantsConfiguration"} - if s.AuthenticationType == nil { - invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) - } - if s.EnableS3AccessGrants == nil { - invalidParams.Add(request.NewErrParamRequired("EnableS3AccessGrants")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAuthenticationType sets the AuthenticationType field's value. -func (s *QueryResultsS3AccessGrantsConfiguration) SetAuthenticationType(v string) *QueryResultsS3AccessGrantsConfiguration { - s.AuthenticationType = &v - return s -} - -// SetCreateUserLevelPrefix sets the CreateUserLevelPrefix field's value. -func (s *QueryResultsS3AccessGrantsConfiguration) SetCreateUserLevelPrefix(v bool) *QueryResultsS3AccessGrantsConfiguration { - s.CreateUserLevelPrefix = &v - return s -} - -// SetEnableS3AccessGrants sets the EnableS3AccessGrants field's value. -func (s *QueryResultsS3AccessGrantsConfiguration) SetEnableS3AccessGrants(v bool) *QueryResultsS3AccessGrantsConfiguration { - s.EnableS3AccessGrants = &v - return s -} - -// The query execution timeline, statistics on input and output rows and bytes, -// and the different query stages that form the query execution plan. -type QueryRuntimeStatistics struct { - _ struct{} `type:"structure"` - - // Stage statistics such as input and output rows and bytes, execution time, - // and stage state. This information also includes substages and the query stage - // plan. - OutputStage *QueryStage `type:"structure"` - - // Statistics such as input rows and bytes read by the query, rows and bytes - // output by the query, and the number of rows written by the query. - Rows *QueryRuntimeStatisticsRows `type:"structure"` - - // Timeline statistics such as query queue time, planning time, execution time, - // service processing time, and total execution time. - Timeline *QueryRuntimeStatisticsTimeline `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryRuntimeStatistics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryRuntimeStatistics) GoString() string { - return s.String() -} - -// SetOutputStage sets the OutputStage field's value. -func (s *QueryRuntimeStatistics) SetOutputStage(v *QueryStage) *QueryRuntimeStatistics { - s.OutputStage = v - return s -} - -// SetRows sets the Rows field's value. -func (s *QueryRuntimeStatistics) SetRows(v *QueryRuntimeStatisticsRows) *QueryRuntimeStatistics { - s.Rows = v - return s -} - -// SetTimeline sets the Timeline field's value. -func (s *QueryRuntimeStatistics) SetTimeline(v *QueryRuntimeStatisticsTimeline) *QueryRuntimeStatistics { - s.Timeline = v - return s -} - -// Statistics such as input rows and bytes read by the query, rows and bytes -// output by the query, and the number of rows written by the query. -type QueryRuntimeStatisticsRows struct { - _ struct{} `type:"structure"` - - // The number of bytes read to execute the query. - InputBytes *int64 `type:"long"` - - // The number of rows read to execute the query. - InputRows *int64 `type:"long"` - - // The number of bytes returned by the query. - OutputBytes *int64 `type:"long"` - - // The number of rows returned by the query. - OutputRows *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryRuntimeStatisticsRows) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryRuntimeStatisticsRows) GoString() string { - return s.String() -} - -// SetInputBytes sets the InputBytes field's value. -func (s *QueryRuntimeStatisticsRows) SetInputBytes(v int64) *QueryRuntimeStatisticsRows { - s.InputBytes = &v - return s -} - -// SetInputRows sets the InputRows field's value. -func (s *QueryRuntimeStatisticsRows) SetInputRows(v int64) *QueryRuntimeStatisticsRows { - s.InputRows = &v - return s -} - -// SetOutputBytes sets the OutputBytes field's value. -func (s *QueryRuntimeStatisticsRows) SetOutputBytes(v int64) *QueryRuntimeStatisticsRows { - s.OutputBytes = &v - return s -} - -// SetOutputRows sets the OutputRows field's value. -func (s *QueryRuntimeStatisticsRows) SetOutputRows(v int64) *QueryRuntimeStatisticsRows { - s.OutputRows = &v - return s -} - -// Timeline statistics such as query queue time, planning time, execution time, -// service processing time, and total execution time. -type QueryRuntimeStatisticsTimeline struct { - _ struct{} `type:"structure"` - - // The number of milliseconds that the query took to execute. - EngineExecutionTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that Athena took to plan the query processing - // flow. This includes the time spent retrieving table partitions from the data - // source. Note that because the query engine performs the query planning, query - // planning time is a subset of engine processing time. - QueryPlanningTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that the query was in your query queue waiting - // for resources. Note that if transient errors occur, Athena might automatically - // add the query back to the queue. - QueryQueueTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that Athena spends on preprocessing before it - // submits the query to the engine. - ServicePreProcessingTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that Athena took to finalize and publish the query - // results after the query engine finished running the query. - ServiceProcessingTimeInMillis *int64 `type:"long"` - - // The number of milliseconds that Athena took to run the query. - TotalExecutionTimeInMillis *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryRuntimeStatisticsTimeline) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryRuntimeStatisticsTimeline) GoString() string { - return s.String() -} - -// SetEngineExecutionTimeInMillis sets the EngineExecutionTimeInMillis field's value. -func (s *QueryRuntimeStatisticsTimeline) SetEngineExecutionTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { - s.EngineExecutionTimeInMillis = &v - return s -} - -// SetQueryPlanningTimeInMillis sets the QueryPlanningTimeInMillis field's value. -func (s *QueryRuntimeStatisticsTimeline) SetQueryPlanningTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { - s.QueryPlanningTimeInMillis = &v - return s -} - -// SetQueryQueueTimeInMillis sets the QueryQueueTimeInMillis field's value. -func (s *QueryRuntimeStatisticsTimeline) SetQueryQueueTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { - s.QueryQueueTimeInMillis = &v - return s -} - -// SetServicePreProcessingTimeInMillis sets the ServicePreProcessingTimeInMillis field's value. -func (s *QueryRuntimeStatisticsTimeline) SetServicePreProcessingTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { - s.ServicePreProcessingTimeInMillis = &v - return s -} - -// SetServiceProcessingTimeInMillis sets the ServiceProcessingTimeInMillis field's value. -func (s *QueryRuntimeStatisticsTimeline) SetServiceProcessingTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { - s.ServiceProcessingTimeInMillis = &v - return s -} - -// SetTotalExecutionTimeInMillis sets the TotalExecutionTimeInMillis field's value. -func (s *QueryRuntimeStatisticsTimeline) SetTotalExecutionTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { - s.TotalExecutionTimeInMillis = &v - return s -} - -// Stage statistics such as input and output rows and bytes, execution time -// and stage state. This information also includes substages and the query stage -// plan. -type QueryStage struct { - _ struct{} `type:"structure"` - - // Time taken to execute this stage. - ExecutionTime *int64 `type:"long"` - - // The number of bytes input into the stage for execution. - InputBytes *int64 `type:"long"` - - // The number of rows input into the stage for execution. - InputRows *int64 `type:"long"` - - // The number of bytes output from the stage after execution. - OutputBytes *int64 `type:"long"` - - // The number of rows output from the stage after execution. - OutputRows *int64 `type:"long"` - - // Stage plan information such as name, identifier, sub plans, and source stages. - QueryStagePlan *QueryStagePlanNode `type:"structure"` - - // The identifier for a stage. - StageId *int64 `type:"long"` - - // State of the stage after query execution. - State *string `type:"string"` - - // List of sub query stages that form this stage execution plan. - SubStages []*QueryStage `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryStage) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryStage) GoString() string { - return s.String() -} - -// SetExecutionTime sets the ExecutionTime field's value. -func (s *QueryStage) SetExecutionTime(v int64) *QueryStage { - s.ExecutionTime = &v - return s -} - -// SetInputBytes sets the InputBytes field's value. -func (s *QueryStage) SetInputBytes(v int64) *QueryStage { - s.InputBytes = &v - return s -} - -// SetInputRows sets the InputRows field's value. -func (s *QueryStage) SetInputRows(v int64) *QueryStage { - s.InputRows = &v - return s -} - -// SetOutputBytes sets the OutputBytes field's value. -func (s *QueryStage) SetOutputBytes(v int64) *QueryStage { - s.OutputBytes = &v - return s -} - -// SetOutputRows sets the OutputRows field's value. -func (s *QueryStage) SetOutputRows(v int64) *QueryStage { - s.OutputRows = &v - return s -} - -// SetQueryStagePlan sets the QueryStagePlan field's value. -func (s *QueryStage) SetQueryStagePlan(v *QueryStagePlanNode) *QueryStage { - s.QueryStagePlan = v - return s -} - -// SetStageId sets the StageId field's value. -func (s *QueryStage) SetStageId(v int64) *QueryStage { - s.StageId = &v - return s -} - -// SetState sets the State field's value. -func (s *QueryStage) SetState(v string) *QueryStage { - s.State = &v - return s -} - -// SetSubStages sets the SubStages field's value. -func (s *QueryStage) SetSubStages(v []*QueryStage) *QueryStage { - s.SubStages = v - return s -} - -// Stage plan information such as name, identifier, sub plans, and remote sources. -type QueryStagePlanNode struct { - _ struct{} `type:"structure"` - - // Stage plan information such as name, identifier, sub plans, and remote sources - // of child plan nodes/ - Children []*QueryStagePlanNode `type:"list"` - - // Information about the operation this query stage plan node is performing. - Identifier *string `type:"string"` - - // Name of the query stage plan that describes the operation this stage is performing - // as part of query execution. - Name *string `type:"string"` - - // Source plan node IDs. - RemoteSources []*string `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryStagePlanNode) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s QueryStagePlanNode) GoString() string { - return s.String() -} - -// SetChildren sets the Children field's value. -func (s *QueryStagePlanNode) SetChildren(v []*QueryStagePlanNode) *QueryStagePlanNode { - s.Children = v - return s -} - -// SetIdentifier sets the Identifier field's value. -func (s *QueryStagePlanNode) SetIdentifier(v string) *QueryStagePlanNode { - s.Identifier = &v - return s -} - -// SetName sets the Name field's value. -func (s *QueryStagePlanNode) SetName(v string) *QueryStagePlanNode { - s.Name = &v - return s -} - -// SetRemoteSources sets the RemoteSources field's value. -func (s *QueryStagePlanNode) SetRemoteSources(v []*string) *QueryStagePlanNode { - s.RemoteSources = v - return s -} - -// A resource, such as a workgroup, was not found. -type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` - - // The name of the Amazon resource. - ResourceName *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResourceNotFoundException) GoString() string { - return s.String() -} - -func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { - return &ResourceNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ResourceNotFoundException) Code() string { - return "ResourceNotFoundException" -} - -// Message returns the exception's message. -func (s *ResourceNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ResourceNotFoundException) OrigErr() error { - return nil -} - -func (s *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ResourceNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ResourceNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The location in Amazon S3 where query and calculation results are stored -// and the encryption option, if any, used for query and calculation results. -// These are known as "client-side settings". If workgroup settings override -// client-side settings, then the query uses the workgroup settings. -type ResultConfiguration struct { - _ struct{} `type:"structure"` - - // Indicates that an Amazon S3 canned ACL should be set to control ownership - // of stored query results. Currently the only supported canned ACL is BUCKET_OWNER_FULL_CONTROL. - // This is a client-side setting. If workgroup settings override client-side - // settings, then the query uses the ACL configuration that is specified for - // the workgroup, and also uses the location for storing query results specified - // in the workgroup. For more information, see WorkGroupConfiguration$EnforceWorkGroupConfiguration - // and Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - AclConfiguration *AclConfiguration `type:"structure"` - - // If query and calculation results are encrypted in Amazon S3, indicates the - // encryption option used (for example, SSE_KMS or CSE_KMS) and key information. - // This is a client-side setting. If workgroup settings override client-side - // settings, then the query uses the encryption configuration that is specified - // for the workgroup, and also uses the location for storing query results specified - // in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration - // and Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - - // The Amazon Web Services account ID that you expect to be the owner of the - // Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, - // Athena uses the value for ExpectedBucketOwner when it makes Amazon S3 calls - // to your specified output location. If the ExpectedBucketOwner Amazon Web - // Services account ID does not match the actual owner of the Amazon S3 bucket, - // the call fails with a permissions error. - // - // This is a client-side setting. If workgroup settings override client-side - // settings, then the query uses the ExpectedBucketOwner setting that is specified - // for the workgroup, and also uses the location for storing query results specified - // in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration - // and Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - ExpectedBucketOwner *string `min:"12" type:"string"` - - // The location in Amazon S3 where your query and calculation results are stored, - // such as s3://path/to/query/bucket/. To run the query, you must specify the - // query results location using one of the ways: either for individual queries - // using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. - // If none of them is set, Athena issues an error that no output location is - // provided. For more information, see Working with query results, recent queries, - // and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html). - // If workgroup settings override client-side settings, then the query uses - // the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - OutputLocation *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResultConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResultConfiguration"} - if s.ExpectedBucketOwner != nil && len(*s.ExpectedBucketOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("ExpectedBucketOwner", 12)) - } - if s.AclConfiguration != nil { - if err := s.AclConfiguration.Validate(); err != nil { - invalidParams.AddNested("AclConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.EncryptionConfiguration != nil { - if err := s.EncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAclConfiguration sets the AclConfiguration field's value. -func (s *ResultConfiguration) SetAclConfiguration(v *AclConfiguration) *ResultConfiguration { - s.AclConfiguration = v - return s -} - -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *ResultConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *ResultConfiguration { - s.EncryptionConfiguration = v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ResultConfiguration) SetExpectedBucketOwner(v string) *ResultConfiguration { - s.ExpectedBucketOwner = &v - return s -} - -// SetOutputLocation sets the OutputLocation field's value. -func (s *ResultConfiguration) SetOutputLocation(v string) *ResultConfiguration { - s.OutputLocation = &v - return s -} - -// The information about the updates in the query results, such as output location -// and encryption configuration for the query results. -type ResultConfigurationUpdates struct { - _ struct{} `type:"structure"` - - // The ACL configuration for the query results. - AclConfiguration *AclConfiguration `type:"structure"` - - // The encryption configuration for query and calculation results. - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - - // The Amazon Web Services account ID that you expect to be the owner of the - // Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, - // Athena uses the value for ExpectedBucketOwner when it makes Amazon S3 calls - // to your specified output location. If the ExpectedBucketOwner Amazon Web - // Services account ID does not match the actual owner of the Amazon S3 bucket, - // the call fails with a permissions error. - // - // If workgroup settings override client-side settings, then the query uses - // the ExpectedBucketOwner setting that is specified for the workgroup, and - // also uses the location for storing query results specified in the workgroup. - // See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings - // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - ExpectedBucketOwner *string `min:"12" type:"string"` - - // The location in Amazon S3 where your query and calculation results are stored, - // such as s3://path/to/query/bucket/. For more information, see Working with - // query results, recent queries, and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html). - // If workgroup settings override client-side settings, then the query uses - // the location for the query results and the encryption configuration that - // are specified for the workgroup. The "workgroup settings override" is specified - // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. - // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - OutputLocation *string `type:"string"` - - // If set to true, indicates that the previously-specified ACL configuration - // for queries in this workgroup should be ignored and set to null. If set to - // false or not set, and a value is present in the AclConfiguration of ResultConfigurationUpdates, - // the AclConfiguration in the workgroup's ResultConfiguration is updated with - // the new value. For more information, see Workgroup Settings Override Client-Side - // Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - RemoveAclConfiguration *bool `type:"boolean"` - - // If set to "true", indicates that the previously-specified encryption configuration - // (also known as the client-side setting) for queries in this workgroup should - // be ignored and set to null. If set to "false" or not set, and a value is - // present in the EncryptionConfiguration in ResultConfigurationUpdates (the - // client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration - // will be updated with the new value. For more information, see Workgroup Settings - // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - RemoveEncryptionConfiguration *bool `type:"boolean"` - - // If set to "true", removes the Amazon Web Services account ID previously specified - // for ResultConfiguration$ExpectedBucketOwner. If set to "false" or not set, - // and a value is present in the ExpectedBucketOwner in ResultConfigurationUpdates - // (the client-side setting), the ExpectedBucketOwner in the workgroup's ResultConfiguration - // is updated with the new value. For more information, see Workgroup Settings - // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - RemoveExpectedBucketOwner *bool `type:"boolean"` - - // If set to "true", indicates that the previously-specified query results location - // (also known as a client-side setting) for queries in this workgroup should - // be ignored and set to null. If set to "false" or not set, and a value is - // present in the OutputLocation in ResultConfigurationUpdates (the client-side - // setting), the OutputLocation in the workgroup's ResultConfiguration will - // be updated with the new value. For more information, see Workgroup Settings - // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - RemoveOutputLocation *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultConfigurationUpdates) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultConfigurationUpdates) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResultConfigurationUpdates) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResultConfigurationUpdates"} - if s.ExpectedBucketOwner != nil && len(*s.ExpectedBucketOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("ExpectedBucketOwner", 12)) - } - if s.AclConfiguration != nil { - if err := s.AclConfiguration.Validate(); err != nil { - invalidParams.AddNested("AclConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.EncryptionConfiguration != nil { - if err := s.EncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAclConfiguration sets the AclConfiguration field's value. -func (s *ResultConfigurationUpdates) SetAclConfiguration(v *AclConfiguration) *ResultConfigurationUpdates { - s.AclConfiguration = v - return s -} - -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *ResultConfigurationUpdates) SetEncryptionConfiguration(v *EncryptionConfiguration) *ResultConfigurationUpdates { - s.EncryptionConfiguration = v - return s -} - -// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. -func (s *ResultConfigurationUpdates) SetExpectedBucketOwner(v string) *ResultConfigurationUpdates { - s.ExpectedBucketOwner = &v - return s -} - -// SetOutputLocation sets the OutputLocation field's value. -func (s *ResultConfigurationUpdates) SetOutputLocation(v string) *ResultConfigurationUpdates { - s.OutputLocation = &v - return s -} - -// SetRemoveAclConfiguration sets the RemoveAclConfiguration field's value. -func (s *ResultConfigurationUpdates) SetRemoveAclConfiguration(v bool) *ResultConfigurationUpdates { - s.RemoveAclConfiguration = &v - return s -} - -// SetRemoveEncryptionConfiguration sets the RemoveEncryptionConfiguration field's value. -func (s *ResultConfigurationUpdates) SetRemoveEncryptionConfiguration(v bool) *ResultConfigurationUpdates { - s.RemoveEncryptionConfiguration = &v - return s -} - -// SetRemoveExpectedBucketOwner sets the RemoveExpectedBucketOwner field's value. -func (s *ResultConfigurationUpdates) SetRemoveExpectedBucketOwner(v bool) *ResultConfigurationUpdates { - s.RemoveExpectedBucketOwner = &v - return s -} - -// SetRemoveOutputLocation sets the RemoveOutputLocation field's value. -func (s *ResultConfigurationUpdates) SetRemoveOutputLocation(v bool) *ResultConfigurationUpdates { - s.RemoveOutputLocation = &v - return s -} - -// Specifies whether previous query results are reused, and if so, their maximum -// age. -type ResultReuseByAgeConfiguration struct { - _ struct{} `type:"structure"` - - // True if previous query results can be reused when the query is run; otherwise, - // false. The default is false. - // - // Enabled is a required field - Enabled *bool `type:"boolean" required:"true"` - - // Specifies, in minutes, the maximum age of a previous query result that Athena - // should consider for reuse. The default is 60. - MaxAgeInMinutes *int64 `type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultReuseByAgeConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultReuseByAgeConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResultReuseByAgeConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResultReuseByAgeConfiguration"} - if s.Enabled == nil { - invalidParams.Add(request.NewErrParamRequired("Enabled")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEnabled sets the Enabled field's value. -func (s *ResultReuseByAgeConfiguration) SetEnabled(v bool) *ResultReuseByAgeConfiguration { - s.Enabled = &v - return s -} - -// SetMaxAgeInMinutes sets the MaxAgeInMinutes field's value. -func (s *ResultReuseByAgeConfiguration) SetMaxAgeInMinutes(v int64) *ResultReuseByAgeConfiguration { - s.MaxAgeInMinutes = &v - return s -} - -// Specifies the query result reuse behavior for the query. -type ResultReuseConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether previous query results are reused, and if so, their maximum - // age. - ResultReuseByAgeConfiguration *ResultReuseByAgeConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultReuseConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultReuseConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ResultReuseConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResultReuseConfiguration"} - if s.ResultReuseByAgeConfiguration != nil { - if err := s.ResultReuseByAgeConfiguration.Validate(); err != nil { - invalidParams.AddNested("ResultReuseByAgeConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResultReuseByAgeConfiguration sets the ResultReuseByAgeConfiguration field's value. -func (s *ResultReuseConfiguration) SetResultReuseByAgeConfiguration(v *ResultReuseByAgeConfiguration) *ResultReuseConfiguration { - s.ResultReuseByAgeConfiguration = v - return s -} - -// Contains information about whether the result of a previous query was reused. -type ResultReuseInformation struct { - _ struct{} `type:"structure"` - - // True if a previous query result was reused; false if the result was generated - // from a new run of the query. - // - // ReusedPreviousResult is a required field - ReusedPreviousResult *bool `type:"boolean" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultReuseInformation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultReuseInformation) GoString() string { - return s.String() -} - -// SetReusedPreviousResult sets the ReusedPreviousResult field's value. -func (s *ResultReuseInformation) SetReusedPreviousResult(v bool) *ResultReuseInformation { - s.ReusedPreviousResult = &v - return s -} - -// The metadata and rows that make up a query result set. The metadata describes -// the column structure and data types. To return a ResultSet object, use GetQueryResults. -type ResultSet struct { - _ struct{} `type:"structure"` - - // The metadata that describes the column structure and data types of a table - // of query results. - ResultSetMetadata *ResultSetMetadata `type:"structure"` - - // The rows in the table. - Rows []*Row `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultSet) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultSet) GoString() string { - return s.String() -} - -// SetResultSetMetadata sets the ResultSetMetadata field's value. -func (s *ResultSet) SetResultSetMetadata(v *ResultSetMetadata) *ResultSet { - s.ResultSetMetadata = v - return s -} - -// SetRows sets the Rows field's value. -func (s *ResultSet) SetRows(v []*Row) *ResultSet { - s.Rows = v - return s -} - -// The metadata that describes the column structure and data types of a table -// of query results. To return a ResultSetMetadata object, use GetQueryResults. -type ResultSetMetadata struct { - _ struct{} `type:"structure"` - - // Information about the columns returned in a query result metadata. - ColumnInfo []*ColumnInfo `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultSetMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ResultSetMetadata) GoString() string { - return s.String() -} - -// SetColumnInfo sets the ColumnInfo field's value. -func (s *ResultSetMetadata) SetColumnInfo(v []*ColumnInfo) *ResultSetMetadata { - s.ColumnInfo = v - return s -} - -// The rows that make up a query result table. -type Row struct { - _ struct{} `type:"structure"` - - // The data that populates a row in a query result table. - Data []*Datum `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Row) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Row) GoString() string { - return s.String() -} - -// SetData sets the Data field's value. -func (s *Row) SetData(v []*Datum) *Row { - s.Data = v - return s -} - -// The specified session already exists. -type SessionAlreadyExistsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionAlreadyExistsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionAlreadyExistsException) GoString() string { - return s.String() -} - -func newErrorSessionAlreadyExistsException(v protocol.ResponseMetadata) error { - return &SessionAlreadyExistsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *SessionAlreadyExistsException) Code() string { - return "SessionAlreadyExistsException" -} - -// Message returns the exception's message. -func (s *SessionAlreadyExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *SessionAlreadyExistsException) OrigErr() error { - return nil -} - -func (s *SessionAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *SessionAlreadyExistsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *SessionAlreadyExistsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains session configuration information. -type SessionConfiguration struct { - _ struct{} `type:"structure"` - - // If query and calculation results are encrypted in Amazon S3, indicates the - // encryption option used (for example, SSE_KMS or CSE_KMS) and key information. - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - - // The ARN of the execution role used to access user resources for Spark sessions - // and Identity Center enabled workgroups. This property applies only to Spark - // enabled workgroups and Identity Center enabled workgroups. - ExecutionRole *string `min:"20" type:"string"` - - // The idle timeout in seconds for the session. - IdleTimeoutSeconds *int64 `type:"long"` - - // The Amazon S3 location that stores information for the notebook. - WorkingDirectory *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionConfiguration) GoString() string { - return s.String() -} - -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *SessionConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *SessionConfiguration { - s.EncryptionConfiguration = v - return s -} - -// SetExecutionRole sets the ExecutionRole field's value. -func (s *SessionConfiguration) SetExecutionRole(v string) *SessionConfiguration { - s.ExecutionRole = &v - return s -} - -// SetIdleTimeoutSeconds sets the IdleTimeoutSeconds field's value. -func (s *SessionConfiguration) SetIdleTimeoutSeconds(v int64) *SessionConfiguration { - s.IdleTimeoutSeconds = &v - return s -} - -// SetWorkingDirectory sets the WorkingDirectory field's value. -func (s *SessionConfiguration) SetWorkingDirectory(v string) *SessionConfiguration { - s.WorkingDirectory = &v - return s -} - -// Contains statistics for a session. -type SessionStatistics struct { - _ struct{} `type:"structure"` - - // The data processing unit execution time for a session in milliseconds. - DpuExecutionInMillis *int64 `type:"long"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionStatistics) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionStatistics) GoString() string { - return s.String() -} - -// SetDpuExecutionInMillis sets the DpuExecutionInMillis field's value. -func (s *SessionStatistics) SetDpuExecutionInMillis(v int64) *SessionStatistics { - s.DpuExecutionInMillis = &v - return s -} - -// Contains information about the status of a session. -type SessionStatus struct { - _ struct{} `type:"structure"` - - // The date and time that the session ended. - EndDateTime *time.Time `type:"timestamp"` - - // The date and time starting at which the session became idle. Can be empty - // if the session is not currently idle. - IdleSinceDateTime *time.Time `type:"timestamp"` - - // The most recent date and time that the session was modified. - LastModifiedDateTime *time.Time `type:"timestamp"` - - // The date and time that the session started. - StartDateTime *time.Time `type:"timestamp"` - - // The state of the session. A description of each state follows. - // - // CREATING - The session is being started, including acquiring resources. - // - // CREATED - The session has been started. - // - // IDLE - The session is able to accept a calculation. - // - // BUSY - The session is processing another task and is unable to accept a calculation. - // - // TERMINATING - The session is in the process of shutting down. - // - // TERMINATED - The session and its resources are no longer running. - // - // DEGRADED - The session has no healthy coordinators. - // - // FAILED - Due to a failure, the session and its resources are no longer running. - State *string `type:"string" enum:"SessionState"` - - // The reason for the session state change (for example, canceled because the - // session was terminated). - StateChangeReason *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionStatus) GoString() string { - return s.String() -} - -// SetEndDateTime sets the EndDateTime field's value. -func (s *SessionStatus) SetEndDateTime(v time.Time) *SessionStatus { - s.EndDateTime = &v - return s -} - -// SetIdleSinceDateTime sets the IdleSinceDateTime field's value. -func (s *SessionStatus) SetIdleSinceDateTime(v time.Time) *SessionStatus { - s.IdleSinceDateTime = &v - return s -} - -// SetLastModifiedDateTime sets the LastModifiedDateTime field's value. -func (s *SessionStatus) SetLastModifiedDateTime(v time.Time) *SessionStatus { - s.LastModifiedDateTime = &v - return s -} - -// SetStartDateTime sets the StartDateTime field's value. -func (s *SessionStatus) SetStartDateTime(v time.Time) *SessionStatus { - s.StartDateTime = &v - return s -} - -// SetState sets the State field's value. -func (s *SessionStatus) SetState(v string) *SessionStatus { - s.State = &v - return s -} - -// SetStateChangeReason sets the StateChangeReason field's value. -func (s *SessionStatus) SetStateChangeReason(v string) *SessionStatus { - s.StateChangeReason = &v - return s -} - -// Contains summary information about a session. -type SessionSummary struct { - _ struct{} `type:"structure"` - - // The session description. - Description *string `min:"1" type:"string"` - - // The engine version used by the session (for example, PySpark engine version - // 3). - EngineVersion *EngineVersion `type:"structure"` - - // The notebook version. - NotebookVersion *string `min:"1" type:"string"` - - // The session ID. - SessionId *string `min:"1" type:"string"` - - // Contains information about the session status. - Status *SessionStatus `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SessionSummary) GoString() string { - return s.String() -} - -// SetDescription sets the Description field's value. -func (s *SessionSummary) SetDescription(v string) *SessionSummary { - s.Description = &v - return s -} - -// SetEngineVersion sets the EngineVersion field's value. -func (s *SessionSummary) SetEngineVersion(v *EngineVersion) *SessionSummary { - s.EngineVersion = v - return s -} - -// SetNotebookVersion sets the NotebookVersion field's value. -func (s *SessionSummary) SetNotebookVersion(v string) *SessionSummary { - s.NotebookVersion = &v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *SessionSummary) SetSessionId(v string) *SessionSummary { - s.SessionId = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *SessionSummary) SetStatus(v *SessionStatus) *SessionSummary { - s.Status = v - return s -} - -type StartCalculationExecutionInput struct { - _ struct{} `type:"structure"` - - // Contains configuration information for the calculation. - // - // Deprecated: Kepler Post GA Tasks : https://sim.amazon.com/issues/ATHENA-39828 - CalculationConfiguration *CalculationConfiguration `deprecated:"true" type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the calculation - // is idempotent (executes only once). If another StartCalculationExecutionRequest - // is received, the same response is returned and another calculation is not - // created. If a parameter has changed, an error is returned. - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // users. If you are not using the Amazon Web Services SDK or the Amazon Web - // Services CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"32" type:"string"` - - // A string that contains the code of the calculation. Use this parameter instead - // of CalculationConfiguration$CodeBlock, which is deprecated. - CodeBlock *string `type:"string"` - - // A description of the calculation. - Description *string `min:"1" type:"string"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartCalculationExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartCalculationExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartCalculationExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartCalculationExecutionInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.SessionId == nil { - invalidParams.Add(request.NewErrParamRequired("SessionId")) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCalculationConfiguration sets the CalculationConfiguration field's value. -func (s *StartCalculationExecutionInput) SetCalculationConfiguration(v *CalculationConfiguration) *StartCalculationExecutionInput { - s.CalculationConfiguration = v - return s -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *StartCalculationExecutionInput) SetClientRequestToken(v string) *StartCalculationExecutionInput { - s.ClientRequestToken = &v - return s -} - -// SetCodeBlock sets the CodeBlock field's value. -func (s *StartCalculationExecutionInput) SetCodeBlock(v string) *StartCalculationExecutionInput { - s.CodeBlock = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *StartCalculationExecutionInput) SetDescription(v string) *StartCalculationExecutionInput { - s.Description = &v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *StartCalculationExecutionInput) SetSessionId(v string) *StartCalculationExecutionInput { - s.SessionId = &v - return s -} - -type StartCalculationExecutionOutput struct { - _ struct{} `type:"structure"` - - // The calculation execution UUID. - CalculationExecutionId *string `min:"1" type:"string"` - - // CREATING - The calculation is in the process of being created. - // - // CREATED - The calculation has been created and is ready to run. - // - // QUEUED - The calculation has been queued for processing. - // - // RUNNING - The calculation is running. - // - // CANCELING - A request to cancel the calculation has been received and the - // system is working to stop it. - // - // CANCELED - The calculation is no longer running as the result of a cancel - // request. - // - // COMPLETED - The calculation has completed without error. - // - // FAILED - The calculation failed and is no longer running. - State *string `type:"string" enum:"CalculationExecutionState"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartCalculationExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartCalculationExecutionOutput) GoString() string { - return s.String() -} - -// SetCalculationExecutionId sets the CalculationExecutionId field's value. -func (s *StartCalculationExecutionOutput) SetCalculationExecutionId(v string) *StartCalculationExecutionOutput { - s.CalculationExecutionId = &v - return s -} - -// SetState sets the State field's value. -func (s *StartCalculationExecutionOutput) SetState(v string) *StartCalculationExecutionOutput { - s.State = &v - return s -} - -type StartQueryExecutionInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the query - // is idempotent (executes only once). If another StartQueryExecution request - // is received, the same response is returned and another query is not created. - // An error is returned if a parameter, such as QueryString, has changed. A - // call to StartQueryExecution that uses a previous client request token returns - // the same QueryExecutionId even if the requester doesn't have permission on - // the tables specified in QueryString. - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // users. If you are not using the Amazon Web Services SDK or the Amazon Web - // Services CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` - - // A list of values for the parameters in a query. The values are applied sequentially - // to the parameters in the query in the order in which the parameters occur. - ExecutionParameters []*string `min:"1" type:"list"` - - // The database within which the query executes. - QueryExecutionContext *QueryExecutionContext `type:"structure"` - - // The SQL query statements to be executed. - // - // QueryString is a required field - QueryString *string `min:"1" type:"string" required:"true"` - - // Specifies information about where and how to save the results of the query - // execution. If the query runs in a workgroup, then workgroup's settings may - // override query settings. This affects the query results location. The workgroup - // settings override is specified in EnforceWorkGroupConfiguration (true/false) - // in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - ResultConfiguration *ResultConfiguration `type:"structure"` - - // Specifies the query result reuse behavior for the query. - ResultReuseConfiguration *ResultReuseConfiguration `type:"structure"` - - // The name of the workgroup in which the query is being started. - WorkGroup *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartQueryExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartQueryExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartQueryExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartQueryExecutionInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) - } - if s.ExecutionParameters != nil && len(s.ExecutionParameters) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ExecutionParameters", 1)) - } - if s.QueryString == nil { - invalidParams.Add(request.NewErrParamRequired("QueryString")) - } - if s.QueryString != nil && len(*s.QueryString) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) - } - if s.QueryExecutionContext != nil { - if err := s.QueryExecutionContext.Validate(); err != nil { - invalidParams.AddNested("QueryExecutionContext", err.(request.ErrInvalidParams)) - } - } - if s.ResultConfiguration != nil { - if err := s.ResultConfiguration.Validate(); err != nil { - invalidParams.AddNested("ResultConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.ResultReuseConfiguration != nil { - if err := s.ResultReuseConfiguration.Validate(); err != nil { - invalidParams.AddNested("ResultReuseConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *StartQueryExecutionInput) SetClientRequestToken(v string) *StartQueryExecutionInput { - s.ClientRequestToken = &v - return s -} - -// SetExecutionParameters sets the ExecutionParameters field's value. -func (s *StartQueryExecutionInput) SetExecutionParameters(v []*string) *StartQueryExecutionInput { - s.ExecutionParameters = v - return s -} - -// SetQueryExecutionContext sets the QueryExecutionContext field's value. -func (s *StartQueryExecutionInput) SetQueryExecutionContext(v *QueryExecutionContext) *StartQueryExecutionInput { - s.QueryExecutionContext = v - return s -} - -// SetQueryString sets the QueryString field's value. -func (s *StartQueryExecutionInput) SetQueryString(v string) *StartQueryExecutionInput { - s.QueryString = &v - return s -} - -// SetResultConfiguration sets the ResultConfiguration field's value. -func (s *StartQueryExecutionInput) SetResultConfiguration(v *ResultConfiguration) *StartQueryExecutionInput { - s.ResultConfiguration = v - return s -} - -// SetResultReuseConfiguration sets the ResultReuseConfiguration field's value. -func (s *StartQueryExecutionInput) SetResultReuseConfiguration(v *ResultReuseConfiguration) *StartQueryExecutionInput { - s.ResultReuseConfiguration = v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *StartQueryExecutionInput) SetWorkGroup(v string) *StartQueryExecutionInput { - s.WorkGroup = &v - return s -} - -type StartQueryExecutionOutput struct { - _ struct{} `type:"structure"` - - // The unique ID of the query that ran as a result of this request. - QueryExecutionId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartQueryExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartQueryExecutionOutput) GoString() string { - return s.String() -} - -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *StartQueryExecutionOutput) SetQueryExecutionId(v string) *StartQueryExecutionOutput { - s.QueryExecutionId = &v - return s -} - -type StartSessionInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the session - // is idempotent (executes only once). If another StartSessionRequest is received, - // the same response is returned and another session is not created. If a parameter - // has changed, an error is returned. - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // users. If you are not using the Amazon Web Services SDK or the Amazon Web - // Services CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"32" type:"string"` - - // The session description. - Description *string `min:"1" type:"string"` - - // Contains engine data processing unit (DPU) configuration settings and parameter - // mappings. - // - // EngineConfiguration is a required field - EngineConfiguration *EngineConfiguration `type:"structure" required:"true"` - - // The notebook version. This value is supplied automatically for notebook sessions - // in the Athena console and is not required for programmatic session access. - // The only valid notebook version is Athena notebook version 1. If you specify - // a value for NotebookVersion, you must also specify a value for NotebookId. - // See EngineConfiguration$AdditionalConfigs. - NotebookVersion *string `min:"1" type:"string"` - - // The idle timeout in minutes for the session. - SessionIdleTimeoutInMinutes *int64 `min:"1" type:"integer"` - - // The workgroup to which the session belongs. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartSessionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartSessionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartSessionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartSessionInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.EngineConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("EngineConfiguration")) - } - if s.NotebookVersion != nil && len(*s.NotebookVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NotebookVersion", 1)) - } - if s.SessionIdleTimeoutInMinutes != nil && *s.SessionIdleTimeoutInMinutes < 1 { - invalidParams.Add(request.NewErrParamMinValue("SessionIdleTimeoutInMinutes", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - if s.EngineConfiguration != nil { - if err := s.EngineConfiguration.Validate(); err != nil { - invalidParams.AddNested("EngineConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *StartSessionInput) SetClientRequestToken(v string) *StartSessionInput { - s.ClientRequestToken = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *StartSessionInput) SetDescription(v string) *StartSessionInput { - s.Description = &v - return s -} - -// SetEngineConfiguration sets the EngineConfiguration field's value. -func (s *StartSessionInput) SetEngineConfiguration(v *EngineConfiguration) *StartSessionInput { - s.EngineConfiguration = v - return s -} - -// SetNotebookVersion sets the NotebookVersion field's value. -func (s *StartSessionInput) SetNotebookVersion(v string) *StartSessionInput { - s.NotebookVersion = &v - return s -} - -// SetSessionIdleTimeoutInMinutes sets the SessionIdleTimeoutInMinutes field's value. -func (s *StartSessionInput) SetSessionIdleTimeoutInMinutes(v int64) *StartSessionInput { - s.SessionIdleTimeoutInMinutes = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *StartSessionInput) SetWorkGroup(v string) *StartSessionInput { - s.WorkGroup = &v - return s -} - -type StartSessionOutput struct { - _ struct{} `type:"structure"` - - // The session ID. - SessionId *string `min:"1" type:"string"` - - // The state of the session. A description of each state follows. - // - // CREATING - The session is being started, including acquiring resources. - // - // CREATED - The session has been started. - // - // IDLE - The session is able to accept a calculation. - // - // BUSY - The session is processing another task and is unable to accept a calculation. - // - // TERMINATING - The session is in the process of shutting down. - // - // TERMINATED - The session and its resources are no longer running. - // - // DEGRADED - The session has no healthy coordinators. - // - // FAILED - Due to a failure, the session and its resources are no longer running. - State *string `type:"string" enum:"SessionState"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartSessionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StartSessionOutput) GoString() string { - return s.String() -} - -// SetSessionId sets the SessionId field's value. -func (s *StartSessionOutput) SetSessionId(v string) *StartSessionOutput { - s.SessionId = &v - return s -} - -// SetState sets the State field's value. -func (s *StartSessionOutput) SetState(v string) *StartSessionOutput { - s.State = &v - return s -} - -type StopCalculationExecutionInput struct { - _ struct{} `type:"structure"` - - // The calculation execution UUID. - // - // CalculationExecutionId is a required field - CalculationExecutionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopCalculationExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopCalculationExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopCalculationExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopCalculationExecutionInput"} - if s.CalculationExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) - } - if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCalculationExecutionId sets the CalculationExecutionId field's value. -func (s *StopCalculationExecutionInput) SetCalculationExecutionId(v string) *StopCalculationExecutionInput { - s.CalculationExecutionId = &v - return s -} - -type StopCalculationExecutionOutput struct { - _ struct{} `type:"structure"` - - // CREATING - The calculation is in the process of being created. - // - // CREATED - The calculation has been created and is ready to run. - // - // QUEUED - The calculation has been queued for processing. - // - // RUNNING - The calculation is running. - // - // CANCELING - A request to cancel the calculation has been received and the - // system is working to stop it. - // - // CANCELED - The calculation is no longer running as the result of a cancel - // request. - // - // COMPLETED - The calculation has completed without error. - // - // FAILED - The calculation failed and is no longer running. - State *string `type:"string" enum:"CalculationExecutionState"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopCalculationExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopCalculationExecutionOutput) GoString() string { - return s.String() -} - -// SetState sets the State field's value. -func (s *StopCalculationExecutionOutput) SetState(v string) *StopCalculationExecutionOutput { - s.State = &v - return s -} - -type StopQueryExecutionInput struct { - _ struct{} `type:"structure"` - - // The unique ID of the query execution to stop. - QueryExecutionId *string `min:"1" type:"string" idempotencyToken:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopQueryExecutionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopQueryExecutionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StopQueryExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StopQueryExecutionInput"} - if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *StopQueryExecutionInput) SetQueryExecutionId(v string) *StopQueryExecutionInput { - s.QueryExecutionId = &v - return s -} - -type StopQueryExecutionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopQueryExecutionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopQueryExecutionOutput) GoString() string { - return s.String() -} - -// Contains metadata for a table. -type TableMetadata struct { - _ struct{} `type:"structure"` - - // A list of the columns in the table. - Columns []*Column `type:"list"` - - // The time that the table was created. - CreateTime *time.Time `type:"timestamp"` - - // The last time the table was accessed. - LastAccessTime *time.Time `type:"timestamp"` - - // The name of the table. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A set of custom key/value pairs for table properties. - Parameters map[string]*string `type:"map"` - - // A list of the partition keys in the table. - PartitionKeys []*Column `type:"list"` - - // The type of table. In Athena, only EXTERNAL_TABLE is supported. - TableType *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TableMetadata) GoString() string { - return s.String() -} - -// SetColumns sets the Columns field's value. -func (s *TableMetadata) SetColumns(v []*Column) *TableMetadata { - s.Columns = v - return s -} - -// SetCreateTime sets the CreateTime field's value. -func (s *TableMetadata) SetCreateTime(v time.Time) *TableMetadata { - s.CreateTime = &v - return s -} - -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *TableMetadata) SetLastAccessTime(v time.Time) *TableMetadata { - s.LastAccessTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *TableMetadata) SetName(v string) *TableMetadata { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *TableMetadata) SetParameters(v map[string]*string) *TableMetadata { - s.Parameters = v - return s -} - -// SetPartitionKeys sets the PartitionKeys field's value. -func (s *TableMetadata) SetPartitionKeys(v []*Column) *TableMetadata { - s.PartitionKeys = v - return s -} - -// SetTableType sets the TableType field's value. -func (s *TableMetadata) SetTableType(v string) *TableMetadata { - s.TableType = &v - return s -} - -// A label that you assign to a resource. Athena resources include workgroups, -// data catalogs, and capacity reservations. Each tag consists of a key and -// an optional value, both of which you define. For example, you can use tags -// to categorize Athena resources by purpose, owner, or environment. Use a consistent -// set of tag keys to make it easier to search and filter the resources in your -// account. For best practices, see Tagging Best Practices (https://docs.aws.amazon.com/whitepapers/latest/tagging-best-practices/tagging-best-practices.html). -// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can -// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers -// representable in UTF-8, and the following characters: + - = . _ : / @. Tag -// keys and values are case-sensitive. Tag keys must be unique per resource. -// If you specify more than one tag, separate them by commas. -type Tag struct { - _ struct{} `type:"structure"` - - // A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. - // You can use letters and numbers representable in UTF-8, and the following - // characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique - // per resource. - Key *string `min:"1" type:"string"` - - // A tag value. The tag value length is from 0 to 256 Unicode characters in - // UTF-8. You can use letters and numbers representable in UTF-8, and the following - // characters: + - = . _ : / @. Tag values are case-sensitive. - Value *string `type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v - return s -} - -type TagResourceInput struct { - _ struct{} `type:"structure"` - - // Specifies the ARN of the Athena resource to which tags are to be added. - // - // ResourceARN is a required field - ResourceARN *string `min:"1" type:"string" required:"true"` - - // A collection of one or more tags, separated by commas, to be added to an - // Athena resource. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.ResourceARN == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceARN")) - } - if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceARN sets the ResourceARN field's value. -func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { - s.ResourceARN = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v - return s -} - -type TagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) GoString() string { - return s.String() -} - -type TerminateSessionInput struct { - _ struct{} `type:"structure"` - - // The session ID. - // - // SessionId is a required field - SessionId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TerminateSessionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TerminateSessionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TerminateSessionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TerminateSessionInput"} - if s.SessionId == nil { - invalidParams.Add(request.NewErrParamRequired("SessionId")) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetSessionId sets the SessionId field's value. -func (s *TerminateSessionInput) SetSessionId(v string) *TerminateSessionInput { - s.SessionId = &v - return s -} - -type TerminateSessionOutput struct { - _ struct{} `type:"structure"` - - // The state of the session. A description of each state follows. - // - // CREATING - The session is being started, including acquiring resources. - // - // CREATED - The session has been started. - // - // IDLE - The session is able to accept a calculation. - // - // BUSY - The session is processing another task and is unable to accept a calculation. - // - // TERMINATING - The session is in the process of shutting down. - // - // TERMINATED - The session and its resources are no longer running. - // - // DEGRADED - The session has no healthy coordinators. - // - // FAILED - Due to a failure, the session and its resources are no longer running. - State *string `type:"string" enum:"SessionState"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TerminateSessionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TerminateSessionOutput) GoString() string { - return s.String() -} - -// SetState sets the State field's value. -func (s *TerminateSessionOutput) SetState(v string) *TerminateSessionOutput { - s.State = &v - return s -} - -// Indicates that the request was throttled. -type TooManyRequestsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"Message" type:"string"` - - // The reason for the query throttling, for example, when it exceeds the concurrent - // query limit. - Reason *string `type:"string" enum:"ThrottleReason"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TooManyRequestsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TooManyRequestsException) GoString() string { - return s.String() -} - -func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { - return &TooManyRequestsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TooManyRequestsException) Code() string { - return "TooManyRequestsException" -} - -// Message returns the exception's message. -func (s *TooManyRequestsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TooManyRequestsException) OrigErr() error { - return nil -} - -func (s *TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TooManyRequestsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TooManyRequestsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Information about a named query ID that could not be processed. -type UnprocessedNamedQueryId struct { - _ struct{} `type:"structure"` - - // The error code returned when the processing request for the named query failed, - // if applicable. - ErrorCode *string `min:"1" type:"string"` - - // The error message returned when the processing request for the named query - // failed, if applicable. - ErrorMessage *string `type:"string"` - - // The unique identifier of the named query. - NamedQueryId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnprocessedNamedQueryId) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnprocessedNamedQueryId) GoString() string { - return s.String() -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *UnprocessedNamedQueryId) SetErrorCode(v string) *UnprocessedNamedQueryId { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *UnprocessedNamedQueryId) SetErrorMessage(v string) *UnprocessedNamedQueryId { - s.ErrorMessage = &v - return s -} - -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *UnprocessedNamedQueryId) SetNamedQueryId(v string) *UnprocessedNamedQueryId { - s.NamedQueryId = &v - return s -} - -// The name of a prepared statement that could not be returned. -type UnprocessedPreparedStatementName struct { - _ struct{} `type:"structure"` - - // The error code returned when the request for the prepared statement failed. - ErrorCode *string `min:"1" type:"string"` - - // The error message containing the reason why the prepared statement could - // not be returned. The following error messages are possible: - // - // * INVALID_INPUT - The name of the prepared statement that was provided - // is not valid (for example, the name is too long). - // - // * STATEMENT_NOT_FOUND - A prepared statement with the name provided could - // not be found. - // - // * UNAUTHORIZED - The requester does not have permission to access the - // workgroup that contains the prepared statement. - ErrorMessage *string `type:"string"` - - // The name of a prepared statement that could not be returned due to an error. - StatementName *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnprocessedPreparedStatementName) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnprocessedPreparedStatementName) GoString() string { - return s.String() -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *UnprocessedPreparedStatementName) SetErrorCode(v string) *UnprocessedPreparedStatementName { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *UnprocessedPreparedStatementName) SetErrorMessage(v string) *UnprocessedPreparedStatementName { - s.ErrorMessage = &v - return s -} - -// SetStatementName sets the StatementName field's value. -func (s *UnprocessedPreparedStatementName) SetStatementName(v string) *UnprocessedPreparedStatementName { - s.StatementName = &v - return s -} - -// Describes a query execution that failed to process. -type UnprocessedQueryExecutionId struct { - _ struct{} `type:"structure"` - - // The error code returned when the query execution failed to process, if applicable. - ErrorCode *string `min:"1" type:"string"` - - // The error message returned when the query execution failed to process, if - // applicable. - ErrorMessage *string `type:"string"` - - // The unique identifier of the query execution. - QueryExecutionId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnprocessedQueryExecutionId) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnprocessedQueryExecutionId) GoString() string { - return s.String() -} - -// SetErrorCode sets the ErrorCode field's value. -func (s *UnprocessedQueryExecutionId) SetErrorCode(v string) *UnprocessedQueryExecutionId { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *UnprocessedQueryExecutionId) SetErrorMessage(v string) *UnprocessedQueryExecutionId { - s.ErrorMessage = &v - return s -} - -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *UnprocessedQueryExecutionId) SetQueryExecutionId(v string) *UnprocessedQueryExecutionId { - s.QueryExecutionId = &v - return s -} - -type UntagResourceInput struct { - _ struct{} `type:"structure"` - - // Specifies the ARN of the resource from which tags are to be removed. - // - // ResourceARN is a required field - ResourceARN *string `min:"1" type:"string" required:"true"` - - // A comma-separated list of one or more tag keys whose tags are to be removed - // from the specified resource. - // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.ResourceARN == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceARN")) - } - if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceARN sets the ResourceARN field's value. -func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { - s.ResourceARN = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v - return s -} - -type UntagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) GoString() string { - return s.String() -} - -type UpdateCapacityReservationInput struct { - _ struct{} `type:"structure"` - - // The name of the capacity reservation. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The new number of requested data processing units. - // - // TargetDpus is a required field - TargetDpus *int64 `min:"24" type:"integer" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCapacityReservationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCapacityReservationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateCapacityReservationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateCapacityReservationInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.TargetDpus == nil { - invalidParams.Add(request.NewErrParamRequired("TargetDpus")) - } - if s.TargetDpus != nil && *s.TargetDpus < 24 { - invalidParams.Add(request.NewErrParamMinValue("TargetDpus", 24)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *UpdateCapacityReservationInput) SetName(v string) *UpdateCapacityReservationInput { - s.Name = &v - return s -} - -// SetTargetDpus sets the TargetDpus field's value. -func (s *UpdateCapacityReservationInput) SetTargetDpus(v int64) *UpdateCapacityReservationInput { - s.TargetDpus = &v - return s -} - -type UpdateCapacityReservationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCapacityReservationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCapacityReservationOutput) GoString() string { - return s.String() -} - -type UpdateDataCatalogInput struct { - _ struct{} `type:"structure"` - - // New or modified text that describes the data catalog. - Description *string `min:"1" type:"string"` - - // The name of the data catalog to update. The catalog name must be unique for - // the Amazon Web Services account and can use a maximum of 127 alphanumeric, - // underscore, at sign, or hyphen characters. The remainder of the length constraint - // of 256 is reserved for use by Athena. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Specifies the Lambda function or functions to use for updating the data catalog. - // This is a mapping whose values depend on the catalog type. - // - // * For the HIVE data catalog type, use the following syntax. The metadata-function - // parameter is required. The sdk-version parameter is optional and defaults - // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number - // - // * For the LAMBDA data catalog type, use one of the following sets of required - // parameters, but not both. If you have one Lambda function that processes - // metadata and another for reading the actual data, use the following syntax. - // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn - // If you have a composite Lambda function that processes both metadata and - // data, use the following syntax to specify your Lambda function. function=lambda_arn - Parameters map[string]*string `type:"map"` - - // Specifies the type of data catalog to update. Specify LAMBDA for a federated - // catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"DataCatalogType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateDataCatalogInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateDataCatalogInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDataCatalogInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateDataCatalogInput) SetDescription(v string) *UpdateDataCatalogInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateDataCatalogInput) SetName(v string) *UpdateDataCatalogInput { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *UpdateDataCatalogInput) SetParameters(v map[string]*string) *UpdateDataCatalogInput { - s.Parameters = v - return s -} - -// SetType sets the Type field's value. -func (s *UpdateDataCatalogInput) SetType(v string) *UpdateDataCatalogInput { - s.Type = &v - return s -} - -type UpdateDataCatalogOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateDataCatalogOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateDataCatalogOutput) GoString() string { - return s.String() -} - -type UpdateNamedQueryInput struct { - _ struct{} `type:"structure"` - - // The query description. - Description *string `type:"string"` - - // The name of the query. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The unique identifier (UUID) of the query. - // - // NamedQueryId is a required field - NamedQueryId *string `min:"1" type:"string" required:"true"` - - // The contents of the query with all query statements. - // - // QueryString is a required field - QueryString *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNamedQueryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNamedQueryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateNamedQueryInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.NamedQueryId == nil { - invalidParams.Add(request.NewErrParamRequired("NamedQueryId")) - } - if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) - } - if s.QueryString == nil { - invalidParams.Add(request.NewErrParamRequired("QueryString")) - } - if s.QueryString != nil && len(*s.QueryString) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateNamedQueryInput) SetDescription(v string) *UpdateNamedQueryInput { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateNamedQueryInput) SetName(v string) *UpdateNamedQueryInput { - s.Name = &v - return s -} - -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *UpdateNamedQueryInput) SetNamedQueryId(v string) *UpdateNamedQueryInput { - s.NamedQueryId = &v - return s -} - -// SetQueryString sets the QueryString field's value. -func (s *UpdateNamedQueryInput) SetQueryString(v string) *UpdateNamedQueryInput { - s.QueryString = &v - return s -} - -type UpdateNamedQueryOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNamedQueryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNamedQueryOutput) GoString() string { - return s.String() -} - -type UpdateNotebookInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the notebook - // is idempotent (executes only once). - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services - // CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"1" type:"string"` - - // The ID of the notebook to update. - // - // NotebookId is a required field - NotebookId *string `min:"1" type:"string" required:"true"` - - // The updated content for the notebook. - // - // Payload is a required field - Payload *string `min:"1" type:"string" required:"true"` - - // The active notebook session ID. Required if the notebook has an active session. - SessionId *string `min:"1" type:"string"` - - // The notebook content type. Currently, the only valid type is IPYNB. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"NotebookType"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateNotebookInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateNotebookInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) - } - if s.NotebookId == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookId")) - } - if s.NotebookId != nil && len(*s.NotebookId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) - } - if s.Payload == nil { - invalidParams.Add(request.NewErrParamRequired("Payload")) - } - if s.Payload != nil && len(*s.Payload) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Payload", 1)) - } - if s.SessionId != nil && len(*s.SessionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *UpdateNotebookInput) SetClientRequestToken(v string) *UpdateNotebookInput { - s.ClientRequestToken = &v - return s -} - -// SetNotebookId sets the NotebookId field's value. -func (s *UpdateNotebookInput) SetNotebookId(v string) *UpdateNotebookInput { - s.NotebookId = &v - return s -} - -// SetPayload sets the Payload field's value. -func (s *UpdateNotebookInput) SetPayload(v string) *UpdateNotebookInput { - s.Payload = &v - return s -} - -// SetSessionId sets the SessionId field's value. -func (s *UpdateNotebookInput) SetSessionId(v string) *UpdateNotebookInput { - s.SessionId = &v - return s -} - -// SetType sets the Type field's value. -func (s *UpdateNotebookInput) SetType(v string) *UpdateNotebookInput { - s.Type = &v - return s -} - -type UpdateNotebookMetadataInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the notebook - // is idempotent (executes only once). - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services - // CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"1" type:"string"` - - // The name to update the notebook to. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The ID of the notebook to update the metadata for. - // - // NotebookId is a required field - NotebookId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookMetadataInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookMetadataInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateNotebookMetadataInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateNotebookMetadataInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.NotebookId == nil { - invalidParams.Add(request.NewErrParamRequired("NotebookId")) - } - if s.NotebookId != nil && len(*s.NotebookId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *UpdateNotebookMetadataInput) SetClientRequestToken(v string) *UpdateNotebookMetadataInput { - s.ClientRequestToken = &v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateNotebookMetadataInput) SetName(v string) *UpdateNotebookMetadataInput { - s.Name = &v - return s -} - -// SetNotebookId sets the NotebookId field's value. -func (s *UpdateNotebookMetadataInput) SetNotebookId(v string) *UpdateNotebookMetadataInput { - s.NotebookId = &v - return s -} - -type UpdateNotebookMetadataOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookMetadataOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookMetadataOutput) GoString() string { - return s.String() -} - -type UpdateNotebookOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateNotebookOutput) GoString() string { - return s.String() -} - -type UpdatePreparedStatementInput struct { - _ struct{} `type:"structure"` - - // The description of the prepared statement. - Description *string `min:"1" type:"string"` - - // The query string for the prepared statement. - // - // QueryStatement is a required field - QueryStatement *string `min:"1" type:"string" required:"true"` - - // The name of the prepared statement. - // - // StatementName is a required field - StatementName *string `min:"1" type:"string" required:"true"` - - // The workgroup for the prepared statement. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePreparedStatementInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePreparedStatementInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdatePreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdatePreparedStatementInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.QueryStatement == nil { - invalidParams.Add(request.NewErrParamRequired("QueryStatement")) - } - if s.QueryStatement != nil && len(*s.QueryStatement) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryStatement", 1)) - } - if s.StatementName == nil { - invalidParams.Add(request.NewErrParamRequired("StatementName")) - } - if s.StatementName != nil && len(*s.StatementName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdatePreparedStatementInput) SetDescription(v string) *UpdatePreparedStatementInput { - s.Description = &v - return s -} - -// SetQueryStatement sets the QueryStatement field's value. -func (s *UpdatePreparedStatementInput) SetQueryStatement(v string) *UpdatePreparedStatementInput { - s.QueryStatement = &v - return s -} - -// SetStatementName sets the StatementName field's value. -func (s *UpdatePreparedStatementInput) SetStatementName(v string) *UpdatePreparedStatementInput { - s.StatementName = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *UpdatePreparedStatementInput) SetWorkGroup(v string) *UpdatePreparedStatementInput { - s.WorkGroup = &v - return s -} - -type UpdatePreparedStatementOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePreparedStatementOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePreparedStatementOutput) GoString() string { - return s.String() -} - -type UpdateWorkGroupInput struct { - _ struct{} `type:"structure"` - - // Contains configuration updates for an Athena SQL workgroup. - ConfigurationUpdates *WorkGroupConfigurationUpdates `type:"structure"` - - // The workgroup description. - Description *string `type:"string"` - - // The workgroup state that will be updated for the given workgroup. - State *string `type:"string" enum:"WorkGroupState"` - - // The specified workgroup that will be updated. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateWorkGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateWorkGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateWorkGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateWorkGroupInput"} - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - if s.ConfigurationUpdates != nil { - if err := s.ConfigurationUpdates.Validate(); err != nil { - invalidParams.AddNested("ConfigurationUpdates", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConfigurationUpdates sets the ConfigurationUpdates field's value. -func (s *UpdateWorkGroupInput) SetConfigurationUpdates(v *WorkGroupConfigurationUpdates) *UpdateWorkGroupInput { - s.ConfigurationUpdates = v - return s -} - -// SetDescription sets the Description field's value. -func (s *UpdateWorkGroupInput) SetDescription(v string) *UpdateWorkGroupInput { - s.Description = &v - return s -} - -// SetState sets the State field's value. -func (s *UpdateWorkGroupInput) SetState(v string) *UpdateWorkGroupInput { - s.State = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *UpdateWorkGroupInput) SetWorkGroup(v string) *UpdateWorkGroupInput { - s.WorkGroup = &v - return s -} - -type UpdateWorkGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateWorkGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateWorkGroupOutput) GoString() string { - return s.String() -} - -// A workgroup, which contains a name, description, creation time, state, and -// other configuration, listed under WorkGroup$Configuration. Each workgroup -// enables you to isolate queries for you or your group of users from other -// queries in the same account, to configure the query results location and -// the encryption configuration (known as workgroup settings), to enable sending -// query metrics to Amazon CloudWatch, and to establish per-query data usage -// control limits for all queries in a workgroup. The workgroup settings override -// is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. -// See WorkGroupConfiguration$EnforceWorkGroupConfiguration. -type WorkGroup struct { - _ struct{} `type:"structure"` - - // The configuration of the workgroup, which includes the location in Amazon - // S3 where query and calculation results are stored, the encryption configuration, - // if any, used for query and calculation results; whether the Amazon CloudWatch - // Metrics are enabled for the workgroup; whether workgroup settings override - // client-side settings; and the data usage limits for the amount of data scanned - // per query or per workgroup. The workgroup settings override is specified - // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. - // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - Configuration *WorkGroupConfiguration `type:"structure"` - - // The date and time the workgroup was created. - CreationTime *time.Time `type:"timestamp"` - - // The workgroup description. - Description *string `type:"string"` - - // The ARN of the IAM Identity Center enabled application associated with the - // workgroup. - IdentityCenterApplicationArn *string `type:"string"` - - // The workgroup name. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // The state of the workgroup: ENABLED or DISABLED. - State *string `type:"string" enum:"WorkGroupState"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroup) GoString() string { - return s.String() -} - -// SetConfiguration sets the Configuration field's value. -func (s *WorkGroup) SetConfiguration(v *WorkGroupConfiguration) *WorkGroup { - s.Configuration = v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *WorkGroup) SetCreationTime(v time.Time) *WorkGroup { - s.CreationTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *WorkGroup) SetDescription(v string) *WorkGroup { - s.Description = &v - return s -} - -// SetIdentityCenterApplicationArn sets the IdentityCenterApplicationArn field's value. -func (s *WorkGroup) SetIdentityCenterApplicationArn(v string) *WorkGroup { - s.IdentityCenterApplicationArn = &v - return s -} - -// SetName sets the Name field's value. -func (s *WorkGroup) SetName(v string) *WorkGroup { - s.Name = &v - return s -} - -// SetState sets the State field's value. -func (s *WorkGroup) SetState(v string) *WorkGroup { - s.State = &v - return s -} - -// The configuration of the workgroup, which includes the location in Amazon -// S3 where query and calculation results are stored, the encryption option, -// if any, used for query and calculation results, whether the Amazon CloudWatch -// Metrics are enabled for the workgroup and whether workgroup settings override -// query settings, and the data usage limits for the amount of data scanned -// per query or per workgroup. The workgroup settings override is specified -// in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. -// See WorkGroupConfiguration$EnforceWorkGroupConfiguration. -type WorkGroupConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies a user defined JSON string that is passed to the notebook engine. - AdditionalConfiguration *string `min:"1" type:"string"` - - // The upper data usage limit (cutoff) for the amount of bytes a single query - // in a workgroup is allowed to scan. - BytesScannedCutoffPerQuery *int64 `min:"1e+07" type:"long"` - - // Specifies the KMS key that is used to encrypt the user's data stores in Athena. - // This setting does not apply to Athena SQL workgroups. - CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration `type:"structure"` - - // Enforces a minimal level of encryption for the workgroup for query and calculation - // results that are written to Amazon S3. When enabled, workgroup users can - // set encryption only to the minimum level set by the administrator or higher - // when they submit queries. - // - // The EnforceWorkGroupConfiguration setting takes precedence over the EnableMinimumEncryptionConfiguration - // flag. This means that if EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration - // flag is ignored, and the workgroup configuration for encryption is used. - EnableMinimumEncryptionConfiguration *bool `type:"boolean"` - - // If set to "true", the settings for the workgroup override client-side settings. - // If set to "false", client-side settings are used. For more information, see - // Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - EnforceWorkGroupConfiguration *bool `type:"boolean"` - - // The engine version that all queries running on the workgroup use. Queries - // on the AmazonAthenaPreviewFunctionality workgroup run on the preview engine - // regardless of this setting. - EngineVersion *EngineVersion `type:"structure"` - - // The ARN of the execution role used to access user resources for Spark sessions - // and IAM Identity Center enabled workgroups. This property applies only to - // Spark enabled workgroups and IAM Identity Center enabled workgroups. The - // property is required for IAM Identity Center enabled workgroups. - ExecutionRole *string `min:"20" type:"string"` - - // Specifies whether the workgroup is IAM Identity Center supported. - IdentityCenterConfiguration *IdentityCenterConfiguration `type:"structure"` - - // Indicates that the Amazon CloudWatch metrics are enabled for the workgroup. - PublishCloudWatchMetricsEnabled *bool `type:"boolean"` - - // Specifies whether Amazon S3 access grants are enabled for query results. - QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration `type:"structure"` - - // If set to true, allows members assigned to a workgroup to reference Amazon - // S3 Requester Pays buckets in queries. If set to false, workgroup members - // cannot query data from Requester Pays buckets, and queries that retrieve - // data from Requester Pays buckets cause an error. The default is false. For - // more information about Requester Pays buckets, see Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) - // in the Amazon Simple Storage Service Developer Guide. - RequesterPaysEnabled *bool `type:"boolean"` - - // The configuration for the workgroup, which includes the location in Amazon - // S3 where query and calculation results are stored and the encryption option, - // if any, used for query and calculation results. To run the query, you must - // specify the query results location using one of the ways: either in the workgroup - // using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. - // If none of them is set, Athena issues an error that no output location is - // provided. For more information, see Working with query results, recent queries, - // and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html). - ResultConfiguration *ResultConfiguration `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroupConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroupConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WorkGroupConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WorkGroupConfiguration"} - if s.AdditionalConfiguration != nil && len(*s.AdditionalConfiguration) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AdditionalConfiguration", 1)) - } - if s.BytesScannedCutoffPerQuery != nil && *s.BytesScannedCutoffPerQuery < 1e+07 { - invalidParams.Add(request.NewErrParamMinValue("BytesScannedCutoffPerQuery", 1e+07)) - } - if s.ExecutionRole != nil && len(*s.ExecutionRole) < 20 { - invalidParams.Add(request.NewErrParamMinLen("ExecutionRole", 20)) - } - if s.CustomerContentEncryptionConfiguration != nil { - if err := s.CustomerContentEncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("CustomerContentEncryptionConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.EngineVersion != nil { - if err := s.EngineVersion.Validate(); err != nil { - invalidParams.AddNested("EngineVersion", err.(request.ErrInvalidParams)) - } - } - if s.QueryResultsS3AccessGrantsConfiguration != nil { - if err := s.QueryResultsS3AccessGrantsConfiguration.Validate(); err != nil { - invalidParams.AddNested("QueryResultsS3AccessGrantsConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.ResultConfiguration != nil { - if err := s.ResultConfiguration.Validate(); err != nil { - invalidParams.AddNested("ResultConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAdditionalConfiguration sets the AdditionalConfiguration field's value. -func (s *WorkGroupConfiguration) SetAdditionalConfiguration(v string) *WorkGroupConfiguration { - s.AdditionalConfiguration = &v - return s -} - -// SetBytesScannedCutoffPerQuery sets the BytesScannedCutoffPerQuery field's value. -func (s *WorkGroupConfiguration) SetBytesScannedCutoffPerQuery(v int64) *WorkGroupConfiguration { - s.BytesScannedCutoffPerQuery = &v - return s -} - -// SetCustomerContentEncryptionConfiguration sets the CustomerContentEncryptionConfiguration field's value. -func (s *WorkGroupConfiguration) SetCustomerContentEncryptionConfiguration(v *CustomerContentEncryptionConfiguration) *WorkGroupConfiguration { - s.CustomerContentEncryptionConfiguration = v - return s -} - -// SetEnableMinimumEncryptionConfiguration sets the EnableMinimumEncryptionConfiguration field's value. -func (s *WorkGroupConfiguration) SetEnableMinimumEncryptionConfiguration(v bool) *WorkGroupConfiguration { - s.EnableMinimumEncryptionConfiguration = &v - return s -} - -// SetEnforceWorkGroupConfiguration sets the EnforceWorkGroupConfiguration field's value. -func (s *WorkGroupConfiguration) SetEnforceWorkGroupConfiguration(v bool) *WorkGroupConfiguration { - s.EnforceWorkGroupConfiguration = &v - return s -} - -// SetEngineVersion sets the EngineVersion field's value. -func (s *WorkGroupConfiguration) SetEngineVersion(v *EngineVersion) *WorkGroupConfiguration { - s.EngineVersion = v - return s -} - -// SetExecutionRole sets the ExecutionRole field's value. -func (s *WorkGroupConfiguration) SetExecutionRole(v string) *WorkGroupConfiguration { - s.ExecutionRole = &v - return s -} - -// SetIdentityCenterConfiguration sets the IdentityCenterConfiguration field's value. -func (s *WorkGroupConfiguration) SetIdentityCenterConfiguration(v *IdentityCenterConfiguration) *WorkGroupConfiguration { - s.IdentityCenterConfiguration = v - return s -} - -// SetPublishCloudWatchMetricsEnabled sets the PublishCloudWatchMetricsEnabled field's value. -func (s *WorkGroupConfiguration) SetPublishCloudWatchMetricsEnabled(v bool) *WorkGroupConfiguration { - s.PublishCloudWatchMetricsEnabled = &v - return s -} - -// SetQueryResultsS3AccessGrantsConfiguration sets the QueryResultsS3AccessGrantsConfiguration field's value. -func (s *WorkGroupConfiguration) SetQueryResultsS3AccessGrantsConfiguration(v *QueryResultsS3AccessGrantsConfiguration) *WorkGroupConfiguration { - s.QueryResultsS3AccessGrantsConfiguration = v - return s -} - -// SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. -func (s *WorkGroupConfiguration) SetRequesterPaysEnabled(v bool) *WorkGroupConfiguration { - s.RequesterPaysEnabled = &v - return s -} - -// SetResultConfiguration sets the ResultConfiguration field's value. -func (s *WorkGroupConfiguration) SetResultConfiguration(v *ResultConfiguration) *WorkGroupConfiguration { - s.ResultConfiguration = v - return s -} - -// The configuration information that will be updated for this workgroup, which -// includes the location in Amazon S3 where query and calculation results are -// stored, the encryption option, if any, used for query results, whether the -// Amazon CloudWatch Metrics are enabled for the workgroup, whether the workgroup -// settings override the client-side settings, and the data usage limit for -// the amount of bytes scanned per query, if it is specified. -type WorkGroupConfigurationUpdates struct { - _ struct{} `type:"structure"` - - // Contains a user defined string in JSON format for a Spark-enabled workgroup. - AdditionalConfiguration *string `min:"1" type:"string"` - - // The upper limit (cutoff) for the amount of bytes a single query in a workgroup - // is allowed to scan. - BytesScannedCutoffPerQuery *int64 `min:"1e+07" type:"long"` - - // Specifies the customer managed KMS key that is used to encrypt the user's - // data stores in Athena. When an Amazon Web Services managed key is used, this - // value is null. This setting does not apply to Athena SQL workgroups. - CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration `type:"structure"` - - // Enforces a minimal level of encryption for the workgroup for query and calculation - // results that are written to Amazon S3. When enabled, workgroup users can - // set encryption only to the minimum level set by the administrator or higher - // when they submit queries. This setting does not apply to Spark-enabled workgroups. - // - // The EnforceWorkGroupConfiguration setting takes precedence over the EnableMinimumEncryptionConfiguration - // flag. This means that if EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration - // flag is ignored, and the workgroup configuration for encryption is used. - EnableMinimumEncryptionConfiguration *bool `type:"boolean"` - - // If set to "true", the settings for the workgroup override client-side settings. - // If set to "false" client-side settings are used. For more information, see - // Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - EnforceWorkGroupConfiguration *bool `type:"boolean"` - - // The engine version requested when a workgroup is updated. After the update, - // all queries on the workgroup run on the requested engine version. If no value - // was previously set, the default is Auto. Queries on the AmazonAthenaPreviewFunctionality - // workgroup run on the preview engine regardless of this setting. - EngineVersion *EngineVersion `type:"structure"` - - // The ARN of the execution role used to access user resources for Spark sessions - // and Identity Center enabled workgroups. This property applies only to Spark - // enabled workgroups and Identity Center enabled workgroups. - ExecutionRole *string `min:"20" type:"string"` - - // Indicates whether this workgroup enables publishing metrics to Amazon CloudWatch. - PublishCloudWatchMetricsEnabled *bool `type:"boolean"` - - // Specifies whether Amazon S3 access grants are enabled for query results. - QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration `type:"structure"` - - // Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery - RemoveBytesScannedCutoffPerQuery *bool `type:"boolean"` - - // Removes content encryption configuration from an Apache Spark-enabled Athena - // workgroup. - RemoveCustomerContentEncryptionConfiguration *bool `type:"boolean"` - - // If set to true, allows members assigned to a workgroup to specify Amazon - // S3 Requester Pays buckets in queries. If set to false, workgroup members - // cannot query data from Requester Pays buckets, and queries that retrieve - // data from Requester Pays buckets cause an error. The default is false. For - // more information about Requester Pays buckets, see Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) - // in the Amazon Simple Storage Service Developer Guide. - RequesterPaysEnabled *bool `type:"boolean"` - - // The result configuration information about the queries in this workgroup - // that will be updated. Includes the updated results location and an updated - // option for encrypting query results. - ResultConfigurationUpdates *ResultConfigurationUpdates `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroupConfigurationUpdates) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroupConfigurationUpdates) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *WorkGroupConfigurationUpdates) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WorkGroupConfigurationUpdates"} - if s.AdditionalConfiguration != nil && len(*s.AdditionalConfiguration) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AdditionalConfiguration", 1)) - } - if s.BytesScannedCutoffPerQuery != nil && *s.BytesScannedCutoffPerQuery < 1e+07 { - invalidParams.Add(request.NewErrParamMinValue("BytesScannedCutoffPerQuery", 1e+07)) - } - if s.ExecutionRole != nil && len(*s.ExecutionRole) < 20 { - invalidParams.Add(request.NewErrParamMinLen("ExecutionRole", 20)) - } - if s.CustomerContentEncryptionConfiguration != nil { - if err := s.CustomerContentEncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("CustomerContentEncryptionConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.EngineVersion != nil { - if err := s.EngineVersion.Validate(); err != nil { - invalidParams.AddNested("EngineVersion", err.(request.ErrInvalidParams)) - } - } - if s.QueryResultsS3AccessGrantsConfiguration != nil { - if err := s.QueryResultsS3AccessGrantsConfiguration.Validate(); err != nil { - invalidParams.AddNested("QueryResultsS3AccessGrantsConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.ResultConfigurationUpdates != nil { - if err := s.ResultConfigurationUpdates.Validate(); err != nil { - invalidParams.AddNested("ResultConfigurationUpdates", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAdditionalConfiguration sets the AdditionalConfiguration field's value. -func (s *WorkGroupConfigurationUpdates) SetAdditionalConfiguration(v string) *WorkGroupConfigurationUpdates { - s.AdditionalConfiguration = &v - return s -} - -// SetBytesScannedCutoffPerQuery sets the BytesScannedCutoffPerQuery field's value. -func (s *WorkGroupConfigurationUpdates) SetBytesScannedCutoffPerQuery(v int64) *WorkGroupConfigurationUpdates { - s.BytesScannedCutoffPerQuery = &v - return s -} - -// SetCustomerContentEncryptionConfiguration sets the CustomerContentEncryptionConfiguration field's value. -func (s *WorkGroupConfigurationUpdates) SetCustomerContentEncryptionConfiguration(v *CustomerContentEncryptionConfiguration) *WorkGroupConfigurationUpdates { - s.CustomerContentEncryptionConfiguration = v - return s -} - -// SetEnableMinimumEncryptionConfiguration sets the EnableMinimumEncryptionConfiguration field's value. -func (s *WorkGroupConfigurationUpdates) SetEnableMinimumEncryptionConfiguration(v bool) *WorkGroupConfigurationUpdates { - s.EnableMinimumEncryptionConfiguration = &v - return s -} - -// SetEnforceWorkGroupConfiguration sets the EnforceWorkGroupConfiguration field's value. -func (s *WorkGroupConfigurationUpdates) SetEnforceWorkGroupConfiguration(v bool) *WorkGroupConfigurationUpdates { - s.EnforceWorkGroupConfiguration = &v - return s -} - -// SetEngineVersion sets the EngineVersion field's value. -func (s *WorkGroupConfigurationUpdates) SetEngineVersion(v *EngineVersion) *WorkGroupConfigurationUpdates { - s.EngineVersion = v - return s -} - -// SetExecutionRole sets the ExecutionRole field's value. -func (s *WorkGroupConfigurationUpdates) SetExecutionRole(v string) *WorkGroupConfigurationUpdates { - s.ExecutionRole = &v - return s -} - -// SetPublishCloudWatchMetricsEnabled sets the PublishCloudWatchMetricsEnabled field's value. -func (s *WorkGroupConfigurationUpdates) SetPublishCloudWatchMetricsEnabled(v bool) *WorkGroupConfigurationUpdates { - s.PublishCloudWatchMetricsEnabled = &v - return s -} - -// SetQueryResultsS3AccessGrantsConfiguration sets the QueryResultsS3AccessGrantsConfiguration field's value. -func (s *WorkGroupConfigurationUpdates) SetQueryResultsS3AccessGrantsConfiguration(v *QueryResultsS3AccessGrantsConfiguration) *WorkGroupConfigurationUpdates { - s.QueryResultsS3AccessGrantsConfiguration = v - return s -} - -// SetRemoveBytesScannedCutoffPerQuery sets the RemoveBytesScannedCutoffPerQuery field's value. -func (s *WorkGroupConfigurationUpdates) SetRemoveBytesScannedCutoffPerQuery(v bool) *WorkGroupConfigurationUpdates { - s.RemoveBytesScannedCutoffPerQuery = &v - return s -} - -// SetRemoveCustomerContentEncryptionConfiguration sets the RemoveCustomerContentEncryptionConfiguration field's value. -func (s *WorkGroupConfigurationUpdates) SetRemoveCustomerContentEncryptionConfiguration(v bool) *WorkGroupConfigurationUpdates { - s.RemoveCustomerContentEncryptionConfiguration = &v - return s -} - -// SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. -func (s *WorkGroupConfigurationUpdates) SetRequesterPaysEnabled(v bool) *WorkGroupConfigurationUpdates { - s.RequesterPaysEnabled = &v - return s -} - -// SetResultConfigurationUpdates sets the ResultConfigurationUpdates field's value. -func (s *WorkGroupConfigurationUpdates) SetResultConfigurationUpdates(v *ResultConfigurationUpdates) *WorkGroupConfigurationUpdates { - s.ResultConfigurationUpdates = v - return s -} - -// The summary information for the workgroup, which includes its name, state, -// description, and the date and time it was created. -type WorkGroupSummary struct { - _ struct{} `type:"structure"` - - // The workgroup creation date and time. - CreationTime *time.Time `type:"timestamp"` - - // The workgroup description. - Description *string `type:"string"` - - // The engine version setting for all queries on the workgroup. Queries on the - // AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless - // of this setting. - EngineVersion *EngineVersion `type:"structure"` - - // The ARN of the IAM Identity Center enabled application associated with the - // workgroup. - IdentityCenterApplicationArn *string `type:"string"` - - // The name of the workgroup. - Name *string `type:"string"` - - // The state of the workgroup. - State *string `type:"string" enum:"WorkGroupState"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroupSummary) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s WorkGroupSummary) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *WorkGroupSummary) SetCreationTime(v time.Time) *WorkGroupSummary { - s.CreationTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *WorkGroupSummary) SetDescription(v string) *WorkGroupSummary { - s.Description = &v - return s -} - -// SetEngineVersion sets the EngineVersion field's value. -func (s *WorkGroupSummary) SetEngineVersion(v *EngineVersion) *WorkGroupSummary { - s.EngineVersion = v - return s -} - -// SetIdentityCenterApplicationArn sets the IdentityCenterApplicationArn field's value. -func (s *WorkGroupSummary) SetIdentityCenterApplicationArn(v string) *WorkGroupSummary { - s.IdentityCenterApplicationArn = &v - return s -} - -// SetName sets the Name field's value. -func (s *WorkGroupSummary) SetName(v string) *WorkGroupSummary { - s.Name = &v - return s -} - -// SetState sets the State field's value. -func (s *WorkGroupSummary) SetState(v string) *WorkGroupSummary { - s.State = &v - return s -} - -const ( - // AuthenticationTypeDirectoryIdentity is a AuthenticationType enum value - AuthenticationTypeDirectoryIdentity = "DIRECTORY_IDENTITY" -) - -// AuthenticationType_Values returns all elements of the AuthenticationType enum -func AuthenticationType_Values() []string { - return []string{ - AuthenticationTypeDirectoryIdentity, - } -} - -const ( - // CalculationExecutionStateCreating is a CalculationExecutionState enum value - CalculationExecutionStateCreating = "CREATING" - - // CalculationExecutionStateCreated is a CalculationExecutionState enum value - CalculationExecutionStateCreated = "CREATED" - - // CalculationExecutionStateQueued is a CalculationExecutionState enum value - CalculationExecutionStateQueued = "QUEUED" - - // CalculationExecutionStateRunning is a CalculationExecutionState enum value - CalculationExecutionStateRunning = "RUNNING" - - // CalculationExecutionStateCanceling is a CalculationExecutionState enum value - CalculationExecutionStateCanceling = "CANCELING" - - // CalculationExecutionStateCanceled is a CalculationExecutionState enum value - CalculationExecutionStateCanceled = "CANCELED" - - // CalculationExecutionStateCompleted is a CalculationExecutionState enum value - CalculationExecutionStateCompleted = "COMPLETED" - - // CalculationExecutionStateFailed is a CalculationExecutionState enum value - CalculationExecutionStateFailed = "FAILED" -) - -// CalculationExecutionState_Values returns all elements of the CalculationExecutionState enum -func CalculationExecutionState_Values() []string { - return []string{ - CalculationExecutionStateCreating, - CalculationExecutionStateCreated, - CalculationExecutionStateQueued, - CalculationExecutionStateRunning, - CalculationExecutionStateCanceling, - CalculationExecutionStateCanceled, - CalculationExecutionStateCompleted, - CalculationExecutionStateFailed, - } -} - -const ( - // CapacityAllocationStatusPending is a CapacityAllocationStatus enum value - CapacityAllocationStatusPending = "PENDING" - - // CapacityAllocationStatusSucceeded is a CapacityAllocationStatus enum value - CapacityAllocationStatusSucceeded = "SUCCEEDED" - - // CapacityAllocationStatusFailed is a CapacityAllocationStatus enum value - CapacityAllocationStatusFailed = "FAILED" -) - -// CapacityAllocationStatus_Values returns all elements of the CapacityAllocationStatus enum -func CapacityAllocationStatus_Values() []string { - return []string{ - CapacityAllocationStatusPending, - CapacityAllocationStatusSucceeded, - CapacityAllocationStatusFailed, - } -} - -const ( - // CapacityReservationStatusPending is a CapacityReservationStatus enum value - CapacityReservationStatusPending = "PENDING" - - // CapacityReservationStatusActive is a CapacityReservationStatus enum value - CapacityReservationStatusActive = "ACTIVE" - - // CapacityReservationStatusCancelling is a CapacityReservationStatus enum value - CapacityReservationStatusCancelling = "CANCELLING" - - // CapacityReservationStatusCancelled is a CapacityReservationStatus enum value - CapacityReservationStatusCancelled = "CANCELLED" - - // CapacityReservationStatusFailed is a CapacityReservationStatus enum value - CapacityReservationStatusFailed = "FAILED" - - // CapacityReservationStatusUpdatePending is a CapacityReservationStatus enum value - CapacityReservationStatusUpdatePending = "UPDATE_PENDING" -) - -// CapacityReservationStatus_Values returns all elements of the CapacityReservationStatus enum -func CapacityReservationStatus_Values() []string { - return []string{ - CapacityReservationStatusPending, - CapacityReservationStatusActive, - CapacityReservationStatusCancelling, - CapacityReservationStatusCancelled, - CapacityReservationStatusFailed, - CapacityReservationStatusUpdatePending, - } -} - -const ( - // ColumnNullableNotNull is a ColumnNullable enum value - ColumnNullableNotNull = "NOT_NULL" - - // ColumnNullableNullable is a ColumnNullable enum value - ColumnNullableNullable = "NULLABLE" - - // ColumnNullableUnknown is a ColumnNullable enum value - ColumnNullableUnknown = "UNKNOWN" -) - -// ColumnNullable_Values returns all elements of the ColumnNullable enum -func ColumnNullable_Values() []string { - return []string{ - ColumnNullableNotNull, - ColumnNullableNullable, - ColumnNullableUnknown, - } -} - -const ( - // DataCatalogTypeLambda is a DataCatalogType enum value - DataCatalogTypeLambda = "LAMBDA" - - // DataCatalogTypeGlue is a DataCatalogType enum value - DataCatalogTypeGlue = "GLUE" - - // DataCatalogTypeHive is a DataCatalogType enum value - DataCatalogTypeHive = "HIVE" -) - -// DataCatalogType_Values returns all elements of the DataCatalogType enum -func DataCatalogType_Values() []string { - return []string{ - DataCatalogTypeLambda, - DataCatalogTypeGlue, - DataCatalogTypeHive, - } -} - -const ( - // EncryptionOptionSseS3 is a EncryptionOption enum value - EncryptionOptionSseS3 = "SSE_S3" - - // EncryptionOptionSseKms is a EncryptionOption enum value - EncryptionOptionSseKms = "SSE_KMS" - - // EncryptionOptionCseKms is a EncryptionOption enum value - EncryptionOptionCseKms = "CSE_KMS" -) - -// EncryptionOption_Values returns all elements of the EncryptionOption enum -func EncryptionOption_Values() []string { - return []string{ - EncryptionOptionSseS3, - EncryptionOptionSseKms, - EncryptionOptionCseKms, - } -} - -const ( - // ExecutorStateCreating is a ExecutorState enum value - ExecutorStateCreating = "CREATING" - - // ExecutorStateCreated is a ExecutorState enum value - ExecutorStateCreated = "CREATED" - - // ExecutorStateRegistered is a ExecutorState enum value - ExecutorStateRegistered = "REGISTERED" - - // ExecutorStateTerminating is a ExecutorState enum value - ExecutorStateTerminating = "TERMINATING" - - // ExecutorStateTerminated is a ExecutorState enum value - ExecutorStateTerminated = "TERMINATED" - - // ExecutorStateFailed is a ExecutorState enum value - ExecutorStateFailed = "FAILED" -) - -// ExecutorState_Values returns all elements of the ExecutorState enum -func ExecutorState_Values() []string { - return []string{ - ExecutorStateCreating, - ExecutorStateCreated, - ExecutorStateRegistered, - ExecutorStateTerminating, - ExecutorStateTerminated, - ExecutorStateFailed, - } -} - -const ( - // ExecutorTypeCoordinator is a ExecutorType enum value - ExecutorTypeCoordinator = "COORDINATOR" - - // ExecutorTypeGateway is a ExecutorType enum value - ExecutorTypeGateway = "GATEWAY" - - // ExecutorTypeWorker is a ExecutorType enum value - ExecutorTypeWorker = "WORKER" -) - -// ExecutorType_Values returns all elements of the ExecutorType enum -func ExecutorType_Values() []string { - return []string{ - ExecutorTypeCoordinator, - ExecutorTypeGateway, - ExecutorTypeWorker, - } -} - -const ( - // NotebookTypeIpynb is a NotebookType enum value - NotebookTypeIpynb = "IPYNB" -) - -// NotebookType_Values returns all elements of the NotebookType enum -func NotebookType_Values() []string { - return []string{ - NotebookTypeIpynb, - } -} - -const ( - // QueryExecutionStateQueued is a QueryExecutionState enum value - QueryExecutionStateQueued = "QUEUED" - - // QueryExecutionStateRunning is a QueryExecutionState enum value - QueryExecutionStateRunning = "RUNNING" - - // QueryExecutionStateSucceeded is a QueryExecutionState enum value - QueryExecutionStateSucceeded = "SUCCEEDED" - - // QueryExecutionStateFailed is a QueryExecutionState enum value - QueryExecutionStateFailed = "FAILED" - - // QueryExecutionStateCancelled is a QueryExecutionState enum value - QueryExecutionStateCancelled = "CANCELLED" -) - -// QueryExecutionState_Values returns all elements of the QueryExecutionState enum -func QueryExecutionState_Values() []string { - return []string{ - QueryExecutionStateQueued, - QueryExecutionStateRunning, - QueryExecutionStateSucceeded, - QueryExecutionStateFailed, - QueryExecutionStateCancelled, - } -} - -const ( - // S3AclOptionBucketOwnerFullControl is a S3AclOption enum value - S3AclOptionBucketOwnerFullControl = "BUCKET_OWNER_FULL_CONTROL" -) - -// S3AclOption_Values returns all elements of the S3AclOption enum -func S3AclOption_Values() []string { - return []string{ - S3AclOptionBucketOwnerFullControl, - } -} - -const ( - // SessionStateCreating is a SessionState enum value - SessionStateCreating = "CREATING" - - // SessionStateCreated is a SessionState enum value - SessionStateCreated = "CREATED" - - // SessionStateIdle is a SessionState enum value - SessionStateIdle = "IDLE" - - // SessionStateBusy is a SessionState enum value - SessionStateBusy = "BUSY" - - // SessionStateTerminating is a SessionState enum value - SessionStateTerminating = "TERMINATING" - - // SessionStateTerminated is a SessionState enum value - SessionStateTerminated = "TERMINATED" - - // SessionStateDegraded is a SessionState enum value - SessionStateDegraded = "DEGRADED" - - // SessionStateFailed is a SessionState enum value - SessionStateFailed = "FAILED" -) - -// SessionState_Values returns all elements of the SessionState enum -func SessionState_Values() []string { - return []string{ - SessionStateCreating, - SessionStateCreated, - SessionStateIdle, - SessionStateBusy, - SessionStateTerminating, - SessionStateTerminated, - SessionStateDegraded, - SessionStateFailed, - } -} - -const ( - // StatementTypeDdl is a StatementType enum value - StatementTypeDdl = "DDL" - - // StatementTypeDml is a StatementType enum value - StatementTypeDml = "DML" - - // StatementTypeUtility is a StatementType enum value - StatementTypeUtility = "UTILITY" -) - -// StatementType_Values returns all elements of the StatementType enum -func StatementType_Values() []string { - return []string{ - StatementTypeDdl, - StatementTypeDml, - StatementTypeUtility, - } -} - -// The reason for the query throttling, for example, when it exceeds the concurrent -// query limit. -const ( - // ThrottleReasonConcurrentQueryLimitExceeded is a ThrottleReason enum value - ThrottleReasonConcurrentQueryLimitExceeded = "CONCURRENT_QUERY_LIMIT_EXCEEDED" -) - -// ThrottleReason_Values returns all elements of the ThrottleReason enum -func ThrottleReason_Values() []string { - return []string{ - ThrottleReasonConcurrentQueryLimitExceeded, - } -} - -const ( - // WorkGroupStateEnabled is a WorkGroupState enum value - WorkGroupStateEnabled = "ENABLED" - - // WorkGroupStateDisabled is a WorkGroupState enum value - WorkGroupStateDisabled = "DISABLED" -) - -// WorkGroupState_Values returns all elements of the WorkGroupState enum -func WorkGroupState_Values() []string { - return []string{ - WorkGroupStateEnabled, - WorkGroupStateDisabled, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/athenaiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/athena/athenaiface/interface.go deleted file mode 100644 index 69eae3c0..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/athenaiface/interface.go +++ /dev/null @@ -1,381 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package athenaiface provides an interface to enable mocking the Amazon Athena service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package athenaiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/athena" -) - -// AthenaAPI provides an interface to enable mocking the -// athena.Athena service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // Amazon Athena. -// func myFunc(svc athenaiface.AthenaAPI) bool { -// // Make svc.BatchGetNamedQuery request -// } -// -// func main() { -// sess := session.New() -// svc := athena.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockAthenaClient struct { -// athenaiface.AthenaAPI -// } -// func (m *mockAthenaClient) BatchGetNamedQuery(input *athena.BatchGetNamedQueryInput) (*athena.BatchGetNamedQueryOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockAthenaClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type AthenaAPI interface { - BatchGetNamedQuery(*athena.BatchGetNamedQueryInput) (*athena.BatchGetNamedQueryOutput, error) - BatchGetNamedQueryWithContext(aws.Context, *athena.BatchGetNamedQueryInput, ...request.Option) (*athena.BatchGetNamedQueryOutput, error) - BatchGetNamedQueryRequest(*athena.BatchGetNamedQueryInput) (*request.Request, *athena.BatchGetNamedQueryOutput) - - BatchGetPreparedStatement(*athena.BatchGetPreparedStatementInput) (*athena.BatchGetPreparedStatementOutput, error) - BatchGetPreparedStatementWithContext(aws.Context, *athena.BatchGetPreparedStatementInput, ...request.Option) (*athena.BatchGetPreparedStatementOutput, error) - BatchGetPreparedStatementRequest(*athena.BatchGetPreparedStatementInput) (*request.Request, *athena.BatchGetPreparedStatementOutput) - - BatchGetQueryExecution(*athena.BatchGetQueryExecutionInput) (*athena.BatchGetQueryExecutionOutput, error) - BatchGetQueryExecutionWithContext(aws.Context, *athena.BatchGetQueryExecutionInput, ...request.Option) (*athena.BatchGetQueryExecutionOutput, error) - BatchGetQueryExecutionRequest(*athena.BatchGetQueryExecutionInput) (*request.Request, *athena.BatchGetQueryExecutionOutput) - - CancelCapacityReservation(*athena.CancelCapacityReservationInput) (*athena.CancelCapacityReservationOutput, error) - CancelCapacityReservationWithContext(aws.Context, *athena.CancelCapacityReservationInput, ...request.Option) (*athena.CancelCapacityReservationOutput, error) - CancelCapacityReservationRequest(*athena.CancelCapacityReservationInput) (*request.Request, *athena.CancelCapacityReservationOutput) - - CreateCapacityReservation(*athena.CreateCapacityReservationInput) (*athena.CreateCapacityReservationOutput, error) - CreateCapacityReservationWithContext(aws.Context, *athena.CreateCapacityReservationInput, ...request.Option) (*athena.CreateCapacityReservationOutput, error) - CreateCapacityReservationRequest(*athena.CreateCapacityReservationInput) (*request.Request, *athena.CreateCapacityReservationOutput) - - CreateDataCatalog(*athena.CreateDataCatalogInput) (*athena.CreateDataCatalogOutput, error) - CreateDataCatalogWithContext(aws.Context, *athena.CreateDataCatalogInput, ...request.Option) (*athena.CreateDataCatalogOutput, error) - CreateDataCatalogRequest(*athena.CreateDataCatalogInput) (*request.Request, *athena.CreateDataCatalogOutput) - - CreateNamedQuery(*athena.CreateNamedQueryInput) (*athena.CreateNamedQueryOutput, error) - CreateNamedQueryWithContext(aws.Context, *athena.CreateNamedQueryInput, ...request.Option) (*athena.CreateNamedQueryOutput, error) - CreateNamedQueryRequest(*athena.CreateNamedQueryInput) (*request.Request, *athena.CreateNamedQueryOutput) - - CreateNotebook(*athena.CreateNotebookInput) (*athena.CreateNotebookOutput, error) - CreateNotebookWithContext(aws.Context, *athena.CreateNotebookInput, ...request.Option) (*athena.CreateNotebookOutput, error) - CreateNotebookRequest(*athena.CreateNotebookInput) (*request.Request, *athena.CreateNotebookOutput) - - CreatePreparedStatement(*athena.CreatePreparedStatementInput) (*athena.CreatePreparedStatementOutput, error) - CreatePreparedStatementWithContext(aws.Context, *athena.CreatePreparedStatementInput, ...request.Option) (*athena.CreatePreparedStatementOutput, error) - CreatePreparedStatementRequest(*athena.CreatePreparedStatementInput) (*request.Request, *athena.CreatePreparedStatementOutput) - - CreatePresignedNotebookUrl(*athena.CreatePresignedNotebookUrlInput) (*athena.CreatePresignedNotebookUrlOutput, error) - CreatePresignedNotebookUrlWithContext(aws.Context, *athena.CreatePresignedNotebookUrlInput, ...request.Option) (*athena.CreatePresignedNotebookUrlOutput, error) - CreatePresignedNotebookUrlRequest(*athena.CreatePresignedNotebookUrlInput) (*request.Request, *athena.CreatePresignedNotebookUrlOutput) - - CreateWorkGroup(*athena.CreateWorkGroupInput) (*athena.CreateWorkGroupOutput, error) - CreateWorkGroupWithContext(aws.Context, *athena.CreateWorkGroupInput, ...request.Option) (*athena.CreateWorkGroupOutput, error) - CreateWorkGroupRequest(*athena.CreateWorkGroupInput) (*request.Request, *athena.CreateWorkGroupOutput) - - DeleteCapacityReservation(*athena.DeleteCapacityReservationInput) (*athena.DeleteCapacityReservationOutput, error) - DeleteCapacityReservationWithContext(aws.Context, *athena.DeleteCapacityReservationInput, ...request.Option) (*athena.DeleteCapacityReservationOutput, error) - DeleteCapacityReservationRequest(*athena.DeleteCapacityReservationInput) (*request.Request, *athena.DeleteCapacityReservationOutput) - - DeleteDataCatalog(*athena.DeleteDataCatalogInput) (*athena.DeleteDataCatalogOutput, error) - DeleteDataCatalogWithContext(aws.Context, *athena.DeleteDataCatalogInput, ...request.Option) (*athena.DeleteDataCatalogOutput, error) - DeleteDataCatalogRequest(*athena.DeleteDataCatalogInput) (*request.Request, *athena.DeleteDataCatalogOutput) - - DeleteNamedQuery(*athena.DeleteNamedQueryInput) (*athena.DeleteNamedQueryOutput, error) - DeleteNamedQueryWithContext(aws.Context, *athena.DeleteNamedQueryInput, ...request.Option) (*athena.DeleteNamedQueryOutput, error) - DeleteNamedQueryRequest(*athena.DeleteNamedQueryInput) (*request.Request, *athena.DeleteNamedQueryOutput) - - DeleteNotebook(*athena.DeleteNotebookInput) (*athena.DeleteNotebookOutput, error) - DeleteNotebookWithContext(aws.Context, *athena.DeleteNotebookInput, ...request.Option) (*athena.DeleteNotebookOutput, error) - DeleteNotebookRequest(*athena.DeleteNotebookInput) (*request.Request, *athena.DeleteNotebookOutput) - - DeletePreparedStatement(*athena.DeletePreparedStatementInput) (*athena.DeletePreparedStatementOutput, error) - DeletePreparedStatementWithContext(aws.Context, *athena.DeletePreparedStatementInput, ...request.Option) (*athena.DeletePreparedStatementOutput, error) - DeletePreparedStatementRequest(*athena.DeletePreparedStatementInput) (*request.Request, *athena.DeletePreparedStatementOutput) - - DeleteWorkGroup(*athena.DeleteWorkGroupInput) (*athena.DeleteWorkGroupOutput, error) - DeleteWorkGroupWithContext(aws.Context, *athena.DeleteWorkGroupInput, ...request.Option) (*athena.DeleteWorkGroupOutput, error) - DeleteWorkGroupRequest(*athena.DeleteWorkGroupInput) (*request.Request, *athena.DeleteWorkGroupOutput) - - ExportNotebook(*athena.ExportNotebookInput) (*athena.ExportNotebookOutput, error) - ExportNotebookWithContext(aws.Context, *athena.ExportNotebookInput, ...request.Option) (*athena.ExportNotebookOutput, error) - ExportNotebookRequest(*athena.ExportNotebookInput) (*request.Request, *athena.ExportNotebookOutput) - - GetCalculationExecution(*athena.GetCalculationExecutionInput) (*athena.GetCalculationExecutionOutput, error) - GetCalculationExecutionWithContext(aws.Context, *athena.GetCalculationExecutionInput, ...request.Option) (*athena.GetCalculationExecutionOutput, error) - GetCalculationExecutionRequest(*athena.GetCalculationExecutionInput) (*request.Request, *athena.GetCalculationExecutionOutput) - - GetCalculationExecutionCode(*athena.GetCalculationExecutionCodeInput) (*athena.GetCalculationExecutionCodeOutput, error) - GetCalculationExecutionCodeWithContext(aws.Context, *athena.GetCalculationExecutionCodeInput, ...request.Option) (*athena.GetCalculationExecutionCodeOutput, error) - GetCalculationExecutionCodeRequest(*athena.GetCalculationExecutionCodeInput) (*request.Request, *athena.GetCalculationExecutionCodeOutput) - - GetCalculationExecutionStatus(*athena.GetCalculationExecutionStatusInput) (*athena.GetCalculationExecutionStatusOutput, error) - GetCalculationExecutionStatusWithContext(aws.Context, *athena.GetCalculationExecutionStatusInput, ...request.Option) (*athena.GetCalculationExecutionStatusOutput, error) - GetCalculationExecutionStatusRequest(*athena.GetCalculationExecutionStatusInput) (*request.Request, *athena.GetCalculationExecutionStatusOutput) - - GetCapacityAssignmentConfiguration(*athena.GetCapacityAssignmentConfigurationInput) (*athena.GetCapacityAssignmentConfigurationOutput, error) - GetCapacityAssignmentConfigurationWithContext(aws.Context, *athena.GetCapacityAssignmentConfigurationInput, ...request.Option) (*athena.GetCapacityAssignmentConfigurationOutput, error) - GetCapacityAssignmentConfigurationRequest(*athena.GetCapacityAssignmentConfigurationInput) (*request.Request, *athena.GetCapacityAssignmentConfigurationOutput) - - GetCapacityReservation(*athena.GetCapacityReservationInput) (*athena.GetCapacityReservationOutput, error) - GetCapacityReservationWithContext(aws.Context, *athena.GetCapacityReservationInput, ...request.Option) (*athena.GetCapacityReservationOutput, error) - GetCapacityReservationRequest(*athena.GetCapacityReservationInput) (*request.Request, *athena.GetCapacityReservationOutput) - - GetDataCatalog(*athena.GetDataCatalogInput) (*athena.GetDataCatalogOutput, error) - GetDataCatalogWithContext(aws.Context, *athena.GetDataCatalogInput, ...request.Option) (*athena.GetDataCatalogOutput, error) - GetDataCatalogRequest(*athena.GetDataCatalogInput) (*request.Request, *athena.GetDataCatalogOutput) - - GetDatabase(*athena.GetDatabaseInput) (*athena.GetDatabaseOutput, error) - GetDatabaseWithContext(aws.Context, *athena.GetDatabaseInput, ...request.Option) (*athena.GetDatabaseOutput, error) - GetDatabaseRequest(*athena.GetDatabaseInput) (*request.Request, *athena.GetDatabaseOutput) - - GetNamedQuery(*athena.GetNamedQueryInput) (*athena.GetNamedQueryOutput, error) - GetNamedQueryWithContext(aws.Context, *athena.GetNamedQueryInput, ...request.Option) (*athena.GetNamedQueryOutput, error) - GetNamedQueryRequest(*athena.GetNamedQueryInput) (*request.Request, *athena.GetNamedQueryOutput) - - GetNotebookMetadata(*athena.GetNotebookMetadataInput) (*athena.GetNotebookMetadataOutput, error) - GetNotebookMetadataWithContext(aws.Context, *athena.GetNotebookMetadataInput, ...request.Option) (*athena.GetNotebookMetadataOutput, error) - GetNotebookMetadataRequest(*athena.GetNotebookMetadataInput) (*request.Request, *athena.GetNotebookMetadataOutput) - - GetPreparedStatement(*athena.GetPreparedStatementInput) (*athena.GetPreparedStatementOutput, error) - GetPreparedStatementWithContext(aws.Context, *athena.GetPreparedStatementInput, ...request.Option) (*athena.GetPreparedStatementOutput, error) - GetPreparedStatementRequest(*athena.GetPreparedStatementInput) (*request.Request, *athena.GetPreparedStatementOutput) - - GetQueryExecution(*athena.GetQueryExecutionInput) (*athena.GetQueryExecutionOutput, error) - GetQueryExecutionWithContext(aws.Context, *athena.GetQueryExecutionInput, ...request.Option) (*athena.GetQueryExecutionOutput, error) - GetQueryExecutionRequest(*athena.GetQueryExecutionInput) (*request.Request, *athena.GetQueryExecutionOutput) - - GetQueryResults(*athena.GetQueryResultsInput) (*athena.GetQueryResultsOutput, error) - GetQueryResultsWithContext(aws.Context, *athena.GetQueryResultsInput, ...request.Option) (*athena.GetQueryResultsOutput, error) - GetQueryResultsRequest(*athena.GetQueryResultsInput) (*request.Request, *athena.GetQueryResultsOutput) - - GetQueryResultsPages(*athena.GetQueryResultsInput, func(*athena.GetQueryResultsOutput, bool) bool) error - GetQueryResultsPagesWithContext(aws.Context, *athena.GetQueryResultsInput, func(*athena.GetQueryResultsOutput, bool) bool, ...request.Option) error - - GetQueryRuntimeStatistics(*athena.GetQueryRuntimeStatisticsInput) (*athena.GetQueryRuntimeStatisticsOutput, error) - GetQueryRuntimeStatisticsWithContext(aws.Context, *athena.GetQueryRuntimeStatisticsInput, ...request.Option) (*athena.GetQueryRuntimeStatisticsOutput, error) - GetQueryRuntimeStatisticsRequest(*athena.GetQueryRuntimeStatisticsInput) (*request.Request, *athena.GetQueryRuntimeStatisticsOutput) - - GetSession(*athena.GetSessionInput) (*athena.GetSessionOutput, error) - GetSessionWithContext(aws.Context, *athena.GetSessionInput, ...request.Option) (*athena.GetSessionOutput, error) - GetSessionRequest(*athena.GetSessionInput) (*request.Request, *athena.GetSessionOutput) - - GetSessionStatus(*athena.GetSessionStatusInput) (*athena.GetSessionStatusOutput, error) - GetSessionStatusWithContext(aws.Context, *athena.GetSessionStatusInput, ...request.Option) (*athena.GetSessionStatusOutput, error) - GetSessionStatusRequest(*athena.GetSessionStatusInput) (*request.Request, *athena.GetSessionStatusOutput) - - GetTableMetadata(*athena.GetTableMetadataInput) (*athena.GetTableMetadataOutput, error) - GetTableMetadataWithContext(aws.Context, *athena.GetTableMetadataInput, ...request.Option) (*athena.GetTableMetadataOutput, error) - GetTableMetadataRequest(*athena.GetTableMetadataInput) (*request.Request, *athena.GetTableMetadataOutput) - - GetWorkGroup(*athena.GetWorkGroupInput) (*athena.GetWorkGroupOutput, error) - GetWorkGroupWithContext(aws.Context, *athena.GetWorkGroupInput, ...request.Option) (*athena.GetWorkGroupOutput, error) - GetWorkGroupRequest(*athena.GetWorkGroupInput) (*request.Request, *athena.GetWorkGroupOutput) - - ImportNotebook(*athena.ImportNotebookInput) (*athena.ImportNotebookOutput, error) - ImportNotebookWithContext(aws.Context, *athena.ImportNotebookInput, ...request.Option) (*athena.ImportNotebookOutput, error) - ImportNotebookRequest(*athena.ImportNotebookInput) (*request.Request, *athena.ImportNotebookOutput) - - ListApplicationDPUSizes(*athena.ListApplicationDPUSizesInput) (*athena.ListApplicationDPUSizesOutput, error) - ListApplicationDPUSizesWithContext(aws.Context, *athena.ListApplicationDPUSizesInput, ...request.Option) (*athena.ListApplicationDPUSizesOutput, error) - ListApplicationDPUSizesRequest(*athena.ListApplicationDPUSizesInput) (*request.Request, *athena.ListApplicationDPUSizesOutput) - - ListApplicationDPUSizesPages(*athena.ListApplicationDPUSizesInput, func(*athena.ListApplicationDPUSizesOutput, bool) bool) error - ListApplicationDPUSizesPagesWithContext(aws.Context, *athena.ListApplicationDPUSizesInput, func(*athena.ListApplicationDPUSizesOutput, bool) bool, ...request.Option) error - - ListCalculationExecutions(*athena.ListCalculationExecutionsInput) (*athena.ListCalculationExecutionsOutput, error) - ListCalculationExecutionsWithContext(aws.Context, *athena.ListCalculationExecutionsInput, ...request.Option) (*athena.ListCalculationExecutionsOutput, error) - ListCalculationExecutionsRequest(*athena.ListCalculationExecutionsInput) (*request.Request, *athena.ListCalculationExecutionsOutput) - - ListCalculationExecutionsPages(*athena.ListCalculationExecutionsInput, func(*athena.ListCalculationExecutionsOutput, bool) bool) error - ListCalculationExecutionsPagesWithContext(aws.Context, *athena.ListCalculationExecutionsInput, func(*athena.ListCalculationExecutionsOutput, bool) bool, ...request.Option) error - - ListCapacityReservations(*athena.ListCapacityReservationsInput) (*athena.ListCapacityReservationsOutput, error) - ListCapacityReservationsWithContext(aws.Context, *athena.ListCapacityReservationsInput, ...request.Option) (*athena.ListCapacityReservationsOutput, error) - ListCapacityReservationsRequest(*athena.ListCapacityReservationsInput) (*request.Request, *athena.ListCapacityReservationsOutput) - - ListCapacityReservationsPages(*athena.ListCapacityReservationsInput, func(*athena.ListCapacityReservationsOutput, bool) bool) error - ListCapacityReservationsPagesWithContext(aws.Context, *athena.ListCapacityReservationsInput, func(*athena.ListCapacityReservationsOutput, bool) bool, ...request.Option) error - - ListDataCatalogs(*athena.ListDataCatalogsInput) (*athena.ListDataCatalogsOutput, error) - ListDataCatalogsWithContext(aws.Context, *athena.ListDataCatalogsInput, ...request.Option) (*athena.ListDataCatalogsOutput, error) - ListDataCatalogsRequest(*athena.ListDataCatalogsInput) (*request.Request, *athena.ListDataCatalogsOutput) - - ListDataCatalogsPages(*athena.ListDataCatalogsInput, func(*athena.ListDataCatalogsOutput, bool) bool) error - ListDataCatalogsPagesWithContext(aws.Context, *athena.ListDataCatalogsInput, func(*athena.ListDataCatalogsOutput, bool) bool, ...request.Option) error - - ListDatabases(*athena.ListDatabasesInput) (*athena.ListDatabasesOutput, error) - ListDatabasesWithContext(aws.Context, *athena.ListDatabasesInput, ...request.Option) (*athena.ListDatabasesOutput, error) - ListDatabasesRequest(*athena.ListDatabasesInput) (*request.Request, *athena.ListDatabasesOutput) - - ListDatabasesPages(*athena.ListDatabasesInput, func(*athena.ListDatabasesOutput, bool) bool) error - ListDatabasesPagesWithContext(aws.Context, *athena.ListDatabasesInput, func(*athena.ListDatabasesOutput, bool) bool, ...request.Option) error - - ListEngineVersions(*athena.ListEngineVersionsInput) (*athena.ListEngineVersionsOutput, error) - ListEngineVersionsWithContext(aws.Context, *athena.ListEngineVersionsInput, ...request.Option) (*athena.ListEngineVersionsOutput, error) - ListEngineVersionsRequest(*athena.ListEngineVersionsInput) (*request.Request, *athena.ListEngineVersionsOutput) - - ListEngineVersionsPages(*athena.ListEngineVersionsInput, func(*athena.ListEngineVersionsOutput, bool) bool) error - ListEngineVersionsPagesWithContext(aws.Context, *athena.ListEngineVersionsInput, func(*athena.ListEngineVersionsOutput, bool) bool, ...request.Option) error - - ListExecutors(*athena.ListExecutorsInput) (*athena.ListExecutorsOutput, error) - ListExecutorsWithContext(aws.Context, *athena.ListExecutorsInput, ...request.Option) (*athena.ListExecutorsOutput, error) - ListExecutorsRequest(*athena.ListExecutorsInput) (*request.Request, *athena.ListExecutorsOutput) - - ListExecutorsPages(*athena.ListExecutorsInput, func(*athena.ListExecutorsOutput, bool) bool) error - ListExecutorsPagesWithContext(aws.Context, *athena.ListExecutorsInput, func(*athena.ListExecutorsOutput, bool) bool, ...request.Option) error - - ListNamedQueries(*athena.ListNamedQueriesInput) (*athena.ListNamedQueriesOutput, error) - ListNamedQueriesWithContext(aws.Context, *athena.ListNamedQueriesInput, ...request.Option) (*athena.ListNamedQueriesOutput, error) - ListNamedQueriesRequest(*athena.ListNamedQueriesInput) (*request.Request, *athena.ListNamedQueriesOutput) - - ListNamedQueriesPages(*athena.ListNamedQueriesInput, func(*athena.ListNamedQueriesOutput, bool) bool) error - ListNamedQueriesPagesWithContext(aws.Context, *athena.ListNamedQueriesInput, func(*athena.ListNamedQueriesOutput, bool) bool, ...request.Option) error - - ListNotebookMetadata(*athena.ListNotebookMetadataInput) (*athena.ListNotebookMetadataOutput, error) - ListNotebookMetadataWithContext(aws.Context, *athena.ListNotebookMetadataInput, ...request.Option) (*athena.ListNotebookMetadataOutput, error) - ListNotebookMetadataRequest(*athena.ListNotebookMetadataInput) (*request.Request, *athena.ListNotebookMetadataOutput) - - ListNotebookSessions(*athena.ListNotebookSessionsInput) (*athena.ListNotebookSessionsOutput, error) - ListNotebookSessionsWithContext(aws.Context, *athena.ListNotebookSessionsInput, ...request.Option) (*athena.ListNotebookSessionsOutput, error) - ListNotebookSessionsRequest(*athena.ListNotebookSessionsInput) (*request.Request, *athena.ListNotebookSessionsOutput) - - ListPreparedStatements(*athena.ListPreparedStatementsInput) (*athena.ListPreparedStatementsOutput, error) - ListPreparedStatementsWithContext(aws.Context, *athena.ListPreparedStatementsInput, ...request.Option) (*athena.ListPreparedStatementsOutput, error) - ListPreparedStatementsRequest(*athena.ListPreparedStatementsInput) (*request.Request, *athena.ListPreparedStatementsOutput) - - ListPreparedStatementsPages(*athena.ListPreparedStatementsInput, func(*athena.ListPreparedStatementsOutput, bool) bool) error - ListPreparedStatementsPagesWithContext(aws.Context, *athena.ListPreparedStatementsInput, func(*athena.ListPreparedStatementsOutput, bool) bool, ...request.Option) error - - ListQueryExecutions(*athena.ListQueryExecutionsInput) (*athena.ListQueryExecutionsOutput, error) - ListQueryExecutionsWithContext(aws.Context, *athena.ListQueryExecutionsInput, ...request.Option) (*athena.ListQueryExecutionsOutput, error) - ListQueryExecutionsRequest(*athena.ListQueryExecutionsInput) (*request.Request, *athena.ListQueryExecutionsOutput) - - ListQueryExecutionsPages(*athena.ListQueryExecutionsInput, func(*athena.ListQueryExecutionsOutput, bool) bool) error - ListQueryExecutionsPagesWithContext(aws.Context, *athena.ListQueryExecutionsInput, func(*athena.ListQueryExecutionsOutput, bool) bool, ...request.Option) error - - ListSessions(*athena.ListSessionsInput) (*athena.ListSessionsOutput, error) - ListSessionsWithContext(aws.Context, *athena.ListSessionsInput, ...request.Option) (*athena.ListSessionsOutput, error) - ListSessionsRequest(*athena.ListSessionsInput) (*request.Request, *athena.ListSessionsOutput) - - ListSessionsPages(*athena.ListSessionsInput, func(*athena.ListSessionsOutput, bool) bool) error - ListSessionsPagesWithContext(aws.Context, *athena.ListSessionsInput, func(*athena.ListSessionsOutput, bool) bool, ...request.Option) error - - ListTableMetadata(*athena.ListTableMetadataInput) (*athena.ListTableMetadataOutput, error) - ListTableMetadataWithContext(aws.Context, *athena.ListTableMetadataInput, ...request.Option) (*athena.ListTableMetadataOutput, error) - ListTableMetadataRequest(*athena.ListTableMetadataInput) (*request.Request, *athena.ListTableMetadataOutput) - - ListTableMetadataPages(*athena.ListTableMetadataInput, func(*athena.ListTableMetadataOutput, bool) bool) error - ListTableMetadataPagesWithContext(aws.Context, *athena.ListTableMetadataInput, func(*athena.ListTableMetadataOutput, bool) bool, ...request.Option) error - - ListTagsForResource(*athena.ListTagsForResourceInput) (*athena.ListTagsForResourceOutput, error) - ListTagsForResourceWithContext(aws.Context, *athena.ListTagsForResourceInput, ...request.Option) (*athena.ListTagsForResourceOutput, error) - ListTagsForResourceRequest(*athena.ListTagsForResourceInput) (*request.Request, *athena.ListTagsForResourceOutput) - - ListTagsForResourcePages(*athena.ListTagsForResourceInput, func(*athena.ListTagsForResourceOutput, bool) bool) error - ListTagsForResourcePagesWithContext(aws.Context, *athena.ListTagsForResourceInput, func(*athena.ListTagsForResourceOutput, bool) bool, ...request.Option) error - - ListWorkGroups(*athena.ListWorkGroupsInput) (*athena.ListWorkGroupsOutput, error) - ListWorkGroupsWithContext(aws.Context, *athena.ListWorkGroupsInput, ...request.Option) (*athena.ListWorkGroupsOutput, error) - ListWorkGroupsRequest(*athena.ListWorkGroupsInput) (*request.Request, *athena.ListWorkGroupsOutput) - - ListWorkGroupsPages(*athena.ListWorkGroupsInput, func(*athena.ListWorkGroupsOutput, bool) bool) error - ListWorkGroupsPagesWithContext(aws.Context, *athena.ListWorkGroupsInput, func(*athena.ListWorkGroupsOutput, bool) bool, ...request.Option) error - - PutCapacityAssignmentConfiguration(*athena.PutCapacityAssignmentConfigurationInput) (*athena.PutCapacityAssignmentConfigurationOutput, error) - PutCapacityAssignmentConfigurationWithContext(aws.Context, *athena.PutCapacityAssignmentConfigurationInput, ...request.Option) (*athena.PutCapacityAssignmentConfigurationOutput, error) - PutCapacityAssignmentConfigurationRequest(*athena.PutCapacityAssignmentConfigurationInput) (*request.Request, *athena.PutCapacityAssignmentConfigurationOutput) - - StartCalculationExecution(*athena.StartCalculationExecutionInput) (*athena.StartCalculationExecutionOutput, error) - StartCalculationExecutionWithContext(aws.Context, *athena.StartCalculationExecutionInput, ...request.Option) (*athena.StartCalculationExecutionOutput, error) - StartCalculationExecutionRequest(*athena.StartCalculationExecutionInput) (*request.Request, *athena.StartCalculationExecutionOutput) - - StartQueryExecution(*athena.StartQueryExecutionInput) (*athena.StartQueryExecutionOutput, error) - StartQueryExecutionWithContext(aws.Context, *athena.StartQueryExecutionInput, ...request.Option) (*athena.StartQueryExecutionOutput, error) - StartQueryExecutionRequest(*athena.StartQueryExecutionInput) (*request.Request, *athena.StartQueryExecutionOutput) - - StartSession(*athena.StartSessionInput) (*athena.StartSessionOutput, error) - StartSessionWithContext(aws.Context, *athena.StartSessionInput, ...request.Option) (*athena.StartSessionOutput, error) - StartSessionRequest(*athena.StartSessionInput) (*request.Request, *athena.StartSessionOutput) - - StopCalculationExecution(*athena.StopCalculationExecutionInput) (*athena.StopCalculationExecutionOutput, error) - StopCalculationExecutionWithContext(aws.Context, *athena.StopCalculationExecutionInput, ...request.Option) (*athena.StopCalculationExecutionOutput, error) - StopCalculationExecutionRequest(*athena.StopCalculationExecutionInput) (*request.Request, *athena.StopCalculationExecutionOutput) - - StopQueryExecution(*athena.StopQueryExecutionInput) (*athena.StopQueryExecutionOutput, error) - StopQueryExecutionWithContext(aws.Context, *athena.StopQueryExecutionInput, ...request.Option) (*athena.StopQueryExecutionOutput, error) - StopQueryExecutionRequest(*athena.StopQueryExecutionInput) (*request.Request, *athena.StopQueryExecutionOutput) - - TagResource(*athena.TagResourceInput) (*athena.TagResourceOutput, error) - TagResourceWithContext(aws.Context, *athena.TagResourceInput, ...request.Option) (*athena.TagResourceOutput, error) - TagResourceRequest(*athena.TagResourceInput) (*request.Request, *athena.TagResourceOutput) - - TerminateSession(*athena.TerminateSessionInput) (*athena.TerminateSessionOutput, error) - TerminateSessionWithContext(aws.Context, *athena.TerminateSessionInput, ...request.Option) (*athena.TerminateSessionOutput, error) - TerminateSessionRequest(*athena.TerminateSessionInput) (*request.Request, *athena.TerminateSessionOutput) - - UntagResource(*athena.UntagResourceInput) (*athena.UntagResourceOutput, error) - UntagResourceWithContext(aws.Context, *athena.UntagResourceInput, ...request.Option) (*athena.UntagResourceOutput, error) - UntagResourceRequest(*athena.UntagResourceInput) (*request.Request, *athena.UntagResourceOutput) - - UpdateCapacityReservation(*athena.UpdateCapacityReservationInput) (*athena.UpdateCapacityReservationOutput, error) - UpdateCapacityReservationWithContext(aws.Context, *athena.UpdateCapacityReservationInput, ...request.Option) (*athena.UpdateCapacityReservationOutput, error) - UpdateCapacityReservationRequest(*athena.UpdateCapacityReservationInput) (*request.Request, *athena.UpdateCapacityReservationOutput) - - UpdateDataCatalog(*athena.UpdateDataCatalogInput) (*athena.UpdateDataCatalogOutput, error) - UpdateDataCatalogWithContext(aws.Context, *athena.UpdateDataCatalogInput, ...request.Option) (*athena.UpdateDataCatalogOutput, error) - UpdateDataCatalogRequest(*athena.UpdateDataCatalogInput) (*request.Request, *athena.UpdateDataCatalogOutput) - - UpdateNamedQuery(*athena.UpdateNamedQueryInput) (*athena.UpdateNamedQueryOutput, error) - UpdateNamedQueryWithContext(aws.Context, *athena.UpdateNamedQueryInput, ...request.Option) (*athena.UpdateNamedQueryOutput, error) - UpdateNamedQueryRequest(*athena.UpdateNamedQueryInput) (*request.Request, *athena.UpdateNamedQueryOutput) - - UpdateNotebook(*athena.UpdateNotebookInput) (*athena.UpdateNotebookOutput, error) - UpdateNotebookWithContext(aws.Context, *athena.UpdateNotebookInput, ...request.Option) (*athena.UpdateNotebookOutput, error) - UpdateNotebookRequest(*athena.UpdateNotebookInput) (*request.Request, *athena.UpdateNotebookOutput) - - UpdateNotebookMetadata(*athena.UpdateNotebookMetadataInput) (*athena.UpdateNotebookMetadataOutput, error) - UpdateNotebookMetadataWithContext(aws.Context, *athena.UpdateNotebookMetadataInput, ...request.Option) (*athena.UpdateNotebookMetadataOutput, error) - UpdateNotebookMetadataRequest(*athena.UpdateNotebookMetadataInput) (*request.Request, *athena.UpdateNotebookMetadataOutput) - - UpdatePreparedStatement(*athena.UpdatePreparedStatementInput) (*athena.UpdatePreparedStatementOutput, error) - UpdatePreparedStatementWithContext(aws.Context, *athena.UpdatePreparedStatementInput, ...request.Option) (*athena.UpdatePreparedStatementOutput, error) - UpdatePreparedStatementRequest(*athena.UpdatePreparedStatementInput) (*request.Request, *athena.UpdatePreparedStatementOutput) - - UpdateWorkGroup(*athena.UpdateWorkGroupInput) (*athena.UpdateWorkGroupOutput, error) - UpdateWorkGroupWithContext(aws.Context, *athena.UpdateWorkGroupInput, ...request.Option) (*athena.UpdateWorkGroupOutput, error) - UpdateWorkGroupRequest(*athena.UpdateWorkGroupInput) (*request.Request, *athena.UpdateWorkGroupOutput) -} - -var _ AthenaAPI = (*athena.Athena)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go b/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go deleted file mode 100644 index eda201e0..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package athena provides the client and types for making API -// requests to Amazon Athena. -// -// Amazon Athena is an interactive query service that lets you use standard -// SQL to analyze data directly in Amazon S3. You can point Athena at your data -// in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is -// serverless, so there is no infrastructure to set up or manage. You pay only -// for the queries you run. Athena scales automatically—executing queries -// in parallel—so results are fast, even with large datasets and complex queries. -// For more information, see What is Amazon Athena (http://docs.aws.amazon.com/athena/latest/ug/what-is.html) -// in the Amazon Athena User Guide. -// -// If you connect to Athena using the JDBC driver, use version 1.1.0 of the -// driver or later with the Amazon Athena API. Earlier version drivers do not -// support the API. For more information and to download the driver, see Accessing -// Amazon Athena with JDBC (https://docs.aws.amazon.com/athena/latest/ug/connect-with-jdbc.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18 for more information on this service. -// -// See athena package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/athena/ -// -// # Using the Client -// -// To contact Amazon Athena with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Amazon Athena client Athena for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/athena/#New -package athena diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go b/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go deleted file mode 100644 index 1685e153..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go +++ /dev/null @@ -1,61 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package athena - -import ( - "github.com/aws/aws-sdk-go/private/protocol" -) - -const ( - - // ErrCodeInternalServerException for service response error code - // "InternalServerException". - // - // Indicates a platform issue, which may be due to a transient condition or - // outage. - ErrCodeInternalServerException = "InternalServerException" - - // ErrCodeInvalidRequestException for service response error code - // "InvalidRequestException". - // - // Indicates that something is wrong with the input to the request. For example, - // a required parameter may be missing or out of range. - ErrCodeInvalidRequestException = "InvalidRequestException" - - // ErrCodeMetadataException for service response error code - // "MetadataException". - // - // An exception that Athena received when it called a custom metastore. Occurs - // if the error is not caused by user input (InvalidRequestException) or from - // the Athena platform (InternalServerException). For example, if a user-created - // Lambda function is missing permissions, the Lambda 4XX exception is returned - // in a MetadataException. - ErrCodeMetadataException = "MetadataException" - - // ErrCodeResourceNotFoundException for service response error code - // "ResourceNotFoundException". - // - // A resource, such as a workgroup, was not found. - ErrCodeResourceNotFoundException = "ResourceNotFoundException" - - // ErrCodeSessionAlreadyExistsException for service response error code - // "SessionAlreadyExistsException". - // - // The specified session already exists. - ErrCodeSessionAlreadyExistsException = "SessionAlreadyExistsException" - - // ErrCodeTooManyRequestsException for service response error code - // "TooManyRequestsException". - // - // Indicates that the request was throttled. - ErrCodeTooManyRequestsException = "TooManyRequestsException" -) - -var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "InternalServerException": newErrorInternalServerException, - "InvalidRequestException": newErrorInvalidRequestException, - "MetadataException": newErrorMetadataException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "SessionAlreadyExistsException": newErrorSessionAlreadyExistsException, - "TooManyRequestsException": newErrorTooManyRequestsException, -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go deleted file mode 100644 index 2991beaf..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go +++ /dev/null @@ -1,109 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package athena - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// Athena provides the API operation methods for making requests to -// Amazon Athena. See this package's package overview docs -// for details on the service. -// -// Athena methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type Athena struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "athena" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "Athena" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the Athena client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// -// mySession := session.Must(session.NewSession()) -// -// // Create a Athena client from just a session. -// svc := athena.New(mySession) -// -// // Create a Athena client with additional configuration -// svc := athena.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *Athena { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = EndpointsID - // No Fallback - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *Athena { - svc := &Athena{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2017-05-18", - ResolvedRegion: resolvedRegion, - JSONVersion: "1.1", - TargetPrefix: "AmazonAthena", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed( - protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), - ) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a Athena operation and runs any -// custom request initialization. -func (c *Athena) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go index 04f6c811..827bd519 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -179,8 +179,8 @@ func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req // // Creates and returns access and refresh tokens for clients and applications // that are authenticated using IAM entities. The access token can be used to -// fetch short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// fetch short-term credentials for the assigned Amazon Web Services accounts +// or to access application APIs using bearer authentication. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -331,6 +331,13 @@ func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *reques // Indicates that an error from the service occurred while trying to process // a request. // +// - InvalidRedirectUriException +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { req, out := c.RegisterClientRequest(input) @@ -619,6 +626,15 @@ type CreateTokenInput struct { // type is currently unsupported for the CreateToken API. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the // result of the StartDeviceAuthorization API. @@ -718,6 +734,12 @@ func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput { + s.CodeVerifier = &v + return s +} + // SetDeviceCode sets the DeviceCode field's value. func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { s.DeviceCode = &v @@ -751,7 +773,8 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { type CreateTokenOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenOutput's @@ -863,6 +886,15 @@ type CreateTokenWithIAMInput struct { // persisted in the Authorization Code GrantOptions for the application. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending // on the grant type that you want: @@ -982,6 +1014,12 @@ func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput { + s.CodeVerifier = &v + return s +} + // SetGrantType sets the GrantType field's value. func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { s.GrantType = &v @@ -1027,7 +1065,8 @@ func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWith type CreateTokenWithIAMOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's @@ -1495,6 +1534,78 @@ func (s *InvalidGrantException) RequestID() string { return s.RespMetadata.RequestID } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_redirect_uri. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) GoString() string { + return s.String() +} + +func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error { + return &InvalidRedirectUriException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRedirectUriException) Code() string { + return "InvalidRedirectUriException" +} + +// Message returns the exception's message. +func (s *InvalidRedirectUriException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRedirectUriException) OrigErr() error { + return nil +} + +func (s *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRedirectUriException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRedirectUriException) RequestID() string { + return s.RespMetadata.RequestID +} + // Indicates that something is wrong with the input to the request. For example, // a required parameter might be missing or out of range. type InvalidRequestException struct { @@ -1731,6 +1842,25 @@ type RegisterClientInput struct { // ClientType is a required field ClientType *string `locationName:"clientType" type:"string" required:"true"` + // This IAM Identity Center application ARN is used to define administrator-managed + // configuration for public client access to resources. At authorization, the + // scopes, grants, and redirect URI available to this client will be restricted + // by this application resource. + EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"` + + // The list of OAuth 2.0 grant types that are defined by the client. This list + // is used to restrict the token granting flows available to the client. + GrantTypes []*string `locationName:"grantTypes" type:"list"` + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string `locationName:"issuerUrl" type:"string"` + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent + // can be redirected back to. + RedirectUris []*string `locationName:"redirectUris" type:"list"` + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []*string `locationName:"scopes" type:"list"` @@ -1782,6 +1912,30 @@ func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { return s } +// SetEntitledApplicationArn sets the EntitledApplicationArn field's value. +func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput { + s.EntitledApplicationArn = &v + return s +} + +// SetGrantTypes sets the GrantTypes field's value. +func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput { + s.GrantTypes = v + return s +} + +// SetIssuerUrl sets the IssuerUrl field's value. +func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput { + s.IssuerUrl = &v + return s +} + +// SetRedirectUris sets the RedirectUris field's value. +func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput { + s.RedirectUris = v + return s +} + // SetScopes sets the Scopes field's value. func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { s.Scopes = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go index e6242e49..cadf4584 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -57,6 +57,13 @@ const ( // makes a CreateToken request with an invalid grant type. ErrCodeInvalidGrantException = "InvalidGrantException" + // ErrCodeInvalidRedirectUriException for service response error code + // "InvalidRedirectUriException". + // + // Indicates that one or more redirect URI in the request is not supported for + // this operation. + ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException" + // ErrCodeInvalidRequestException for service response error code // "InvalidRequestException". // @@ -106,6 +113,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidClientException": newErrorInvalidClientException, "InvalidClientMetadataException": newErrorInvalidClientMetadataException, "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRedirectUriException": newErrorInvalidRedirectUriException, "InvalidRequestException": newErrorInvalidRequestException, "InvalidRequestRegionException": newErrorInvalidRequestRegionException, "InvalidScopeException": newErrorInvalidScopeException, diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore index c92d6105..2518b349 100644 --- a/vendor/github.com/aws/smithy-go/.gitignore +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -24,3 +24,6 @@ build/ # VS Code bin/ .vscode/ + +# make +c.out diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 46b11508..4df632dc 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,89 @@ +# Release (2025-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.3 + * **Bug Fix**: Fix HTTP metrics data race. + * **Bug Fix**: Replace usages of deprecated ioutil package. + +# Release (2025-01-21) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.2 + * **Bug Fix**: Fix HTTP metrics data race. + * **Bug Fix**: Replace usages of deprecated ioutil package. + +# Release (2024-11-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.1 + * **Bug Fix**: Fix failure to replace URI path segments when their names overlap. + +# Release (2024-10-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.0 + * **Feature**: Add HTTP client metrics. + +# Release (2024-09-25) + +## Module Highlights +* `github.com/aws/smithy-go/aws-http-auth`: [v1.0.0](aws-http-auth/CHANGELOG.md#v100-2024-09-25) + * **Release**: Initial release of module aws-http-auth, which implements generically consumable SigV4 and SigV4a request signing. + +# Release (2024-09-19) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.21.0 + * **Feature**: Add tracing and metrics APIs, and builtin instrumentation for both, in generated clients. +* `github.com/aws/smithy-go/metrics/smithyotelmetrics`: [v1.0.0](metrics/smithyotelmetrics/CHANGELOG.md#v100-2024-09-19) + * **Release**: Initial release of `smithyotelmetrics` module, which is used to adapt an OpenTelemetry SDK meter provider to be used with Smithy clients. +* `github.com/aws/smithy-go/tracing/smithyoteltracing`: [v1.0.0](tracing/smithyoteltracing/CHANGELOG.md#v100-2024-09-19) + * **Release**: Initial release of `smithyoteltracing` module, which is used to adapt an OpenTelemetry SDK tracer provider to be used with Smithy clients. + +# Release (2024-08-14) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.4 + * **Dependency Update**: Bump minimum Go version to 1.21. + +# Release (2024-06-27) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.3 + * **Bug Fix**: Fix encoding/cbor test overflow on x86. + +# Release (2024-03-29) + +* No change notes available for this release. + +# Release (2024-02-21) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.1 + * **Bug Fix**: Remove runtime dependency on go-cmp. + +# Release (2024-02-13) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.0 + * **Feature**: Add codegen definition for sigv4a trait. + * **Feature**: Bump minimum Go version to 1.20 per our language support policy. + # Release (2023-12-07) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md index c4b6a1c5..1f8d01ff 100644 --- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -39,6 +39,37 @@ To send us a pull request, please: GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). +### Changelog Documents + +(You can SKIP this step if you are only changing the code generator, and not the runtime). + +When submitting a pull request please include a changelog file on a folder named `.changelog`. +These are used to generate the content `CHANGELOG.md` and Release Notes. The format of the file is as follows: + +``` +{ + "id": "12345678-1234-1234-1234-123456789012" + "type": "bugfix" + "collapse": true + "description": "Fix improper use of printf-style functions.", + "modules": [ + "." + ] +} +``` + +* id: a UUID. This should also be used for the name of the file, so if your id is `12345678-1234-1234-1234-123456789012` the file should be named `12345678-1234-1234-1234-123456789012.json/` +* type: one of the following: + * bugfix: Fixing an existing bug + * Feature: Adding a new feature to an existing service + * Release: Releasing a new module + * Dependency: Updating dependencies + * Announcement: Making an announcement, like deprecation of a module +* collapse: whether this change should appear separately on the release notes on every module listed on `modules` (`"collapse": false`), or if it should show up as a single entry (`"collapse": true`) + * For the smithy-go repository this should always be `false` +* description: Description of this change. Most of the times is the same as the title of the PR +* modules: which Go modules does this change impact. The root module is expressed as "." + ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index 4b3c2093..a3c2cf17 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -33,13 +33,18 @@ smithy-clean: ################## # Linting/Verify # ################## -.PHONY: verify vet +.PHONY: verify vet cover verify: vet vet: go vet ${BUILD_TAGS} --all ./... +cover: + go test ${BUILD_TAGS} -coverprofile c.out ./... + @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \ + echo "total (statements): $$cover%"; + ################ # Unit Testing # ################ @@ -93,5 +98,12 @@ module-version: ############## .PHONY: install-changelog +external-changelog: + mkdir -p .changelog + cp changelog-template.json .changelog/00000000-0000-0000-0000-000000000000.json + @echo "Generate a new UUID and update the file at .changelog/00000000-0000-0000-0000-000000000000.json" + @echo "Make sure to rename the file with your new id, like .changelog/12345678-1234-1234-1234-123456789012.json" + @echo "See CONTRIBUTING.md 'Changelog Documents' and an example at https://github.com/aws/smithy-go/pull/543/files" + install-changelog: go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index c374f692..08df7458 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -1,19 +1,21 @@ -## Smithy Go +# Smithy Go [![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) -[Smithy](https://smithy.io/) code generators for Go. +[Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime. + +The smithy-go runtime requires a minimum version of Go 1.20. **WARNING: All interfaces are subject to change.** -## Can I use this? +## Can I use the code generators? In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), in order to generate transport mechanisms and serialization/deserialization code ("serde") accordingly. -The code generator does not currently support any protocols out of the box, +The code generator does not currently support any protocols out of the box other than the new `smithy.protocols#rpcv2Cbor`, therefore the useability of this project on its own is currently limited. Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are @@ -21,6 +23,70 @@ tracking the movement of those out of the SDK into smithy-go in [#458](https://github.com/aws/smithy-go/issues/458), but there's currently no timeline for doing so. +## Plugins + +This repository implements the following Smithy build plugins: + +| ID | GAV prefix | Description | +|----|------------|-------------| +| `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. | +| `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. | + +**NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.** + +## `go-codegen` + +### Configuration + +[`GoSettings`](codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/GoSettings.java) +contains all of the settings enabled from `smithy-build.json` and helper +methods and types. The up-to-date list of top-level properties enabled for +`go-client-codegen` can be found in `GoSettings::from()`. + +| Setting | Type | Required | Description | +|-----------------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------| +| `service` | string | yes | The Shape ID of the service for which to generate the client. | +| `module` | string | yes | Name of the module in `generated.json` (and `go.mod` if `generateGoMod` is enabled) and `doc.go`. | +| `generateGoMod` | boolean | | Whether to generate a default `go.mod` file. The default value is `false`. | +| `goDirective` | string | | [Go directive](https://go.dev/ref/mod#go-mod-file-go) of the module. The default value is the minimum supported Go version. | + +### Supported protocols + +| Protocol | Notes | +|----------|-------| +| [`smithy.protocols#rpcv2Cbor`](https://smithy.io/2.0/additional-specs/protocols/smithy-rpc-v2.html) | Event streaming not yet implemented. | + +### Example + +This example applies the `go-codegen` build plugin to the Smithy quickstart +example created from `smithy init`: + +```json +{ + "version": "1.0", + "sources": [ + "models" + ], + "maven": { + "dependencies": [ + "software.amazon.smithy.go:smithy-go-codegen:0.1.0" + ] + }, + "plugins": { + "go-codegen": { + "service": "example.weather#Weather", + "module": "github.com/example/weather", + "generateGoMod": true, + "goDirective": "1.20" + } + } +} +``` + +## `go-server-codegen` + +This plugin is a work-in-progress and is currently undocumented. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/changelog-template.json b/vendor/github.com/aws/smithy-go/changelog-template.json new file mode 100644 index 00000000..d36e2b3e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/changelog-template.json @@ -0,0 +1,9 @@ +{ + "id": "00000000-0000-0000-0000-000000000000", + "type": "feature|bugfix|dependency", + "description": "Description of your changes", + "collapse": false, + "modules": [ + "." + ] +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go index e78926c9..9ae30854 100644 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -22,33 +22,33 @@ func bufCap(b []byte, n int) []byte { // replacePathElement replaces a single element in the path []byte. // Escape is used to control whether the value will be escaped using Amazon path escape style. func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { - fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + // search for "{}". If not found, search for the greedy version "{+}". If none are found, return error + fieldBuf = bufCap(fieldBuf, len(key)+2) // { } fieldBuf = append(fieldBuf, uriTokenStart) fieldBuf = append(fieldBuf, key...) + fieldBuf = append(fieldBuf, uriTokenStop) start := bytes.Index(path, fieldBuf) - end := start + len(fieldBuf) - if start < 0 || len(path[end:]) == 0 { - // TODO what to do about error? - return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) - } - encodeSep := true - if path[end] == uriTokenSkip { - // '+' token means do not escape slashes + if start < 0 { + fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + fieldBuf = append(fieldBuf, uriTokenStart) + fieldBuf = append(fieldBuf, key...) + fieldBuf = append(fieldBuf, uriTokenSkip) + fieldBuf = append(fieldBuf, uriTokenStop) + + start = bytes.Index(path, fieldBuf) + if start < 0 { + return path, fieldBuf, fmt.Errorf("invalid path index, start=%d. %s", start, path) + } encodeSep = false - end++ } + end := start + len(fieldBuf) if escape { val = EscapePath(val, encodeSep) } - if path[end] != uriTokenStop { - return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) - } - end++ - fieldBuf = bufCap(fieldBuf, len(val)) fieldBuf = append(fieldBuf, val...) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go new file mode 100644 index 00000000..7a232f66 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/array.go @@ -0,0 +1,35 @@ +package json + +import ( + "bytes" +) + +// Array represents the encoding of a JSON Array +type Array struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newArray(w *bytes.Buffer, scratch *[]byte) *Array { + w.WriteRune(leftBracket) + return &Array{w: w, scratch: scratch} +} + +// Value adds a new element to the JSON Array. +// Returns a Value type that is used to encode +// the array element. +func (a *Array) Value() Value { + if a.writeComma { + a.w.WriteRune(comma) + } else { + a.writeComma = true + } + + return newValue(a.w, a.scratch) +} + +// Close encodes the end of the JSON Array +func (a *Array) Close() { + a.w.WriteRune(rightBracket) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go new file mode 100644 index 00000000..91044092 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/constants.go @@ -0,0 +1,15 @@ +package json + +const ( + leftBrace = '{' + rightBrace = '}' + + leftBracket = '[' + rightBracket = ']' + + comma = ',' + quote = '"' + colon = ':' + + null = "null" +) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go new file mode 100644 index 00000000..7050c85b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go @@ -0,0 +1,139 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// DiscardUnknownField discards unknown fields from a decoder body. +// This function is useful while deserializing a JSON body with additional +// unknown information that should be discarded. +func DiscardUnknownField(decoder *json.Decoder) error { + // This deliberately does not share logic with CollectUnknownField, even + // though it could, because if we were to delegate to that then we'd incur + // extra allocations and general memory usage. + v, err := decoder.Token() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if _, ok := v.(json.Delim); ok { + for decoder.More() { + err = DiscardUnknownField(decoder) + } + endToken, err := decoder.Token() + if err != nil { + return err + } + if _, ok := endToken.(json.Delim); !ok { + return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v", + endToken, endToken) + } + } + + return nil +} + +// CollectUnknownField grabs the contents of unknown fields from the decoder body +// and returns them as a byte slice. This is useful for skipping unknown fields without +// completely discarding them. +func CollectUnknownField(decoder *json.Decoder) ([]byte, error) { + result, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + buff := bytes.NewBuffer(nil) + encoder := json.NewEncoder(buff) + + if err := encoder.Encode(result); err != nil { + return nil, err + } + + return buff.Bytes(), nil +} + +func collectUnknownField(decoder *json.Decoder) (interface{}, error) { + // Grab the initial value. This could either be a concrete value like a string or a a + // delimiter. + token, err := decoder.Token() + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + + // If it's an array or object, we'll need to recurse. + delim, ok := token.(json.Delim) + if ok { + var result interface{} + if delim == '{' { + result, err = collectUnknownObject(decoder) + if err != nil { + return nil, err + } + } else { + result, err = collectUnknownArray(decoder) + if err != nil { + return nil, err + } + } + + // Discard the closing token. decoder.Token handles checking for matching delimiters + if _, err := decoder.Token(); err != nil { + return nil, err + } + return result, nil + } + + return token, nil +} + +func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) { + // We need to create an empty array here instead of a nil array, since by getting + // into this function at all we necessarily have seen a non-nil list. + array := []interface{}{} + + for decoder.More() { + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + array = append(array, value) + } + + return array, nil +} + +func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) { + object := make(map[string]interface{}) + + for decoder.More() { + key, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + // Keys have to be strings, which is particularly important as the encoder + // won't except a map with interface{} keys + stringKey, ok := key.(string) + if !ok { + return nil, fmt.Errorf("expected string key, found %T", key) + } + + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + object[stringKey] = value + } + + return object, nil +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go new file mode 100644 index 00000000..8772953f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go @@ -0,0 +1,30 @@ +package json + +import ( + "bytes" +) + +// Encoder is JSON encoder that supports construction of JSON values +// using methods. +type Encoder struct { + w *bytes.Buffer + Value +} + +// NewEncoder returns a new JSON encoder +func NewEncoder() *Encoder { + writer := bytes.NewBuffer(nil) + scratch := make([]byte, 64) + + return &Encoder{w: writer, Value: newValue(writer, &scratch)} +} + +// String returns the String output of the JSON encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the JSON encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go new file mode 100644 index 00000000..d984d0cd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/escape.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet + +package json + +import ( + "bytes" + "unicode/utf8" +) + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// copied from Go 1.8 stdlib's encoding/json/#hex +var hex = "0123456789abcdef" + +// escapeStringBytes escapes and writes the passed in string bytes to the dst +// buffer +// +// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes +func escapeStringBytes(e *bytes.Buffer, s []byte) { + e.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + e.Write(s[start:i]) + } + switch b { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(b) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // If escapeHTML is set, it also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + e.WriteString(`\u00`) + e.WriteByte(hex[b>>4]) + e.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\u202`) + e.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + e.Write(s[start:]) + } + e.WriteByte('"') +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go new file mode 100644 index 00000000..722346d0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/object.go @@ -0,0 +1,40 @@ +package json + +import ( + "bytes" +) + +// Object represents the encoding of a JSON Object type +type Object struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newObject(w *bytes.Buffer, scratch *[]byte) *Object { + w.WriteRune(leftBrace) + return &Object{w: w, scratch: scratch} +} + +func (o *Object) writeKey(key string) { + escapeStringBytes(o.w, []byte(key)) + o.w.WriteRune(colon) +} + +// Key adds the given named key to the JSON object. +// Returns a Value encoder that should be used to encode +// a JSON value type. +func (o *Object) Key(name string) Value { + if o.writeComma { + o.w.WriteRune(comma) + } else { + o.writeComma = true + } + o.writeKey(name) + return newValue(o.w, o.scratch) +} + +// Close encodes the end of the JSON Object +func (o *Object) Close() { + o.w.WriteRune(rightBrace) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go new file mode 100644 index 00000000..b41ff1e1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/value.go @@ -0,0 +1,149 @@ +package json + +import ( + "bytes" + "encoding/base64" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents a JSON Value type +// JSON Value types: Object, Array, String, Number, Boolean, and Null +type Value struct { + w *bytes.Buffer + scratch *[]byte +} + +// newValue returns a new Value encoder +func newValue(w *bytes.Buffer, scratch *[]byte) Value { + return Value{w: w, scratch: scratch} +} + +// String encodes v as a JSON string +func (jv Value) String(v string) { + escapeStringBytes(jv.w, []byte(v)) +} + +// Byte encodes v as a JSON number +func (jv Value) Byte(v int8) { + jv.Long(int64(v)) +} + +// Short encodes v as a JSON number +func (jv Value) Short(v int16) { + jv.Long(int64(v)) +} + +// Integer encodes v as a JSON number +func (jv Value) Integer(v int32) { + jv.Long(int64(v)) +} + +// Long encodes v as a JSON number +func (jv Value) Long(v int64) { + *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// ULong encodes v as a JSON number +func (jv Value) ULong(v uint64) { + *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// Float encodes v as a JSON number +func (jv Value) Float(v float32) { + jv.float(float64(v), 32) +} + +// Double encodes v as a JSON number +func (jv Value) Double(v float64) { + jv.float(v, 64) +} + +func (jv Value) float(v float64, bits int) { + *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits) + jv.w.Write(*jv.scratch) +} + +// Boolean encodes v as a JSON boolean +func (jv Value) Boolean(v bool) { + *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v) + jv.w.Write(*jv.scratch) +} + +// Base64EncodeBytes writes v as a base64 value in JSON string +func (jv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(jv.w, (*jv.scratch)[:0], v) +} + +// Write writes v directly to the JSON document +func (jv Value) Write(v []byte) { + jv.w.Write(v) +} + +// Array returns a new Array encoder +func (jv Value) Array() *Array { + return newArray(jv.w, jv.scratch) +} + +// Object returns a new Object encoder +func (jv Value) Object() *Object { + return newObject(jv.w, jv.scratch) +} + +// Null encodes a null JSON value +func (jv Value) Null() { + jv.w.WriteString(null) +} + +// BigInteger encodes v as JSON value +func (jv Value) BigInteger(v *big.Int) { + jv.w.Write([]byte(v.Text(10))) +} + +// BigDecimal encodes v as JSON value +func (jv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + jv.Long(i) + return + } + // TODO: Should this try to match ES6 ToString similar to stdlib JSON? + jv.w.Write([]byte(v.Text('e', -1))) +} + +// Based on encoding/json encodeByteSlice from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) { + if v == nil { + w.WriteString(null) + return + } + + w.WriteRune(quote) + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } + + w.WriteRune(quote) +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index cd6f7fa4..d12d9589 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.19.0" +const goModuleVersion = "1.22.3" diff --git a/vendor/github.com/aws/smithy-go/metrics/metrics.go b/vendor/github.com/aws/smithy-go/metrics/metrics.go new file mode 100644 index 00000000..c009d9f2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/metrics/metrics.go @@ -0,0 +1,136 @@ +// Package metrics defines the metrics APIs used by Smithy clients. +package metrics + +import ( + "context" + + "github.com/aws/smithy-go" +) + +// MeterProvider is the entry point for creating a Meter. +type MeterProvider interface { + Meter(scope string, opts ...MeterOption) Meter +} + +// MeterOption applies configuration to a Meter. +type MeterOption func(o *MeterOptions) + +// MeterOptions represents configuration for a Meter. +type MeterOptions struct { + Properties smithy.Properties +} + +// Meter is the entry point for creation of measurement instruments. +type Meter interface { + // integer/synchronous + Int64Counter(name string, opts ...InstrumentOption) (Int64Counter, error) + Int64UpDownCounter(name string, opts ...InstrumentOption) (Int64UpDownCounter, error) + Int64Gauge(name string, opts ...InstrumentOption) (Int64Gauge, error) + Int64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) + + // integer/asynchronous + Int64AsyncCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Int64AsyncUpDownCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Int64AsyncGauge(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + + // floating-point/synchronous + Float64Counter(name string, opts ...InstrumentOption) (Float64Counter, error) + Float64UpDownCounter(name string, opts ...InstrumentOption) (Float64UpDownCounter, error) + Float64Gauge(name string, opts ...InstrumentOption) (Float64Gauge, error) + Float64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) + + // floating-point/asynchronous + Float64AsyncCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Float64AsyncUpDownCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) + Float64AsyncGauge(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error) +} + +// InstrumentOption applies configuration to an instrument. +type InstrumentOption func(o *InstrumentOptions) + +// InstrumentOptions represents configuration for an instrument. +type InstrumentOptions struct { + UnitLabel string + Description string +} + +// Int64Counter measures a monotonically increasing int64 value. +type Int64Counter interface { + Add(context.Context, int64, ...RecordMetricOption) +} + +// Int64UpDownCounter measures a fluctuating int64 value. +type Int64UpDownCounter interface { + Add(context.Context, int64, ...RecordMetricOption) +} + +// Int64Gauge samples a discrete int64 value. +type Int64Gauge interface { + Sample(context.Context, int64, ...RecordMetricOption) +} + +// Int64Histogram records multiple data points for an int64 value. +type Int64Histogram interface { + Record(context.Context, int64, ...RecordMetricOption) +} + +// Float64Counter measures a monotonically increasing float64 value. +type Float64Counter interface { + Add(context.Context, float64, ...RecordMetricOption) +} + +// Float64UpDownCounter measures a fluctuating float64 value. +type Float64UpDownCounter interface { + Add(context.Context, float64, ...RecordMetricOption) +} + +// Float64Gauge samples a discrete float64 value. +type Float64Gauge interface { + Sample(context.Context, float64, ...RecordMetricOption) +} + +// Float64Histogram records multiple data points for an float64 value. +type Float64Histogram interface { + Record(context.Context, float64, ...RecordMetricOption) +} + +// AsyncInstrument is the universal handle returned for creation of all async +// instruments. +// +// Callers use the Stop() API to unregister the callback passed at instrument +// creation. +type AsyncInstrument interface { + Stop() +} + +// Int64Callback describes a function invoked when an async int64 instrument is +// read. +type Int64Callback func(context.Context, Int64Observer) + +// Int64Observer is the interface passed to async int64 instruments. +// +// Callers use the Observe() API of this interface to report metrics to the +// underlying collector. +type Int64Observer interface { + Observe(context.Context, int64, ...RecordMetricOption) +} + +// Float64Callback describes a function invoked when an async float64 +// instrument is read. +type Float64Callback func(context.Context, Float64Observer) + +// Float64Observer is the interface passed to async int64 instruments. +// +// Callers use the Observe() API of this interface to report metrics to the +// underlying collector. +type Float64Observer interface { + Observe(context.Context, float64, ...RecordMetricOption) +} + +// RecordMetricOption applies configuration to a recorded metric. +type RecordMetricOption func(o *RecordMetricOptions) + +// RecordMetricOptions represents configuration for a recorded metric. +type RecordMetricOptions struct { + Properties smithy.Properties +} diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go new file mode 100644 index 00000000..fb374e1f --- /dev/null +++ b/vendor/github.com/aws/smithy-go/metrics/nop.go @@ -0,0 +1,67 @@ +package metrics + +import "context" + +// NopMeterProvider is a no-op metrics implementation. +type NopMeterProvider struct{} + +var _ MeterProvider = (*NopMeterProvider)(nil) + +// Meter returns a meter which creates no-op instruments. +func (NopMeterProvider) Meter(string, ...MeterOption) Meter { + return nopMeter{} +} + +type nopMeter struct{} + +var _ Meter = (*nopMeter)(nil) + +func (nopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[int64]{}, nil +} +func (nopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[float64]{}, nil +} +func (nopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) { + return nopInstrument[float64]{}, nil +} + +type nopInstrument[N any] struct{} + +func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {} +func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {} +func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {} +func (nopInstrument[_]) Stop() {} diff --git a/vendor/github.com/aws/smithy-go/middleware/context.go b/vendor/github.com/aws/smithy-go/middleware/context.go new file mode 100644 index 00000000..f51aa4f0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/context.go @@ -0,0 +1,41 @@ +package middleware + +import "context" + +type ( + serviceIDKey struct{} + operationNameKey struct{} +) + +// WithServiceID adds a service ID to the context, scoped to middleware stack +// values. +// +// This API is called in the client runtime when bootstrapping an operation and +// should not typically be used directly. +func WithServiceID(parent context.Context, id string) context.Context { + return WithStackValue(parent, serviceIDKey{}, id) +} + +// GetServiceID retrieves the service ID from the context. This is typically +// the service shape's name from its Smithy model. Service clients for specific +// systems (e.g. AWS SDK) may use an alternate designated value. +func GetServiceID(ctx context.Context) string { + id, _ := GetStackValue(ctx, serviceIDKey{}).(string) + return id +} + +// WithOperationName adds the operation name to the context, scoped to +// middleware stack values. +// +// This API is called in the client runtime when bootstrapping an operation and +// should not typically be used directly. +func WithOperationName(parent context.Context, id string) context.Context { + return WithStackValue(parent, operationNameKey{}, id) +} + +// GetOperationName retrieves the operation name from the context. This is +// typically the operation shape's name from its Smithy model. +func GetOperationName(ctx context.Context) string { + name, _ := GetStackValue(ctx, operationNameKey{}).(string) + return name +} diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml index 20295cdd..9d94b7cb 100644 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -1,5 +1,4 @@ [dependencies] - "github.com/google/go-cmp" = "v0.5.8" "github.com/jmespath/go-jmespath" = "v0.4.0" [modules] diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go new file mode 100644 index 00000000..004d78f2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go @@ -0,0 +1,30 @@ +package requestcompression + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" +) + +func gzipCompress(input io.Reader) ([]byte, error) { + var b bytes.Buffer + w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression) + if err != nil { + return nil, fmt.Errorf("failed to create gzip writer, %v", err) + } + + inBytes, err := io.ReadAll(input) + if err != nil { + return nil, fmt.Errorf("failed read payload to compress, %v", err) + } + + if _, err = w.Write(inBytes); err != nil { + return nil, fmt.Errorf("failed to write payload to be compressed, %v", err) + } + if err = w.Close(); err != nil { + return nil, fmt.Errorf("failed to flush payload being compressed, %v", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go new file mode 100644 index 00000000..06c16afc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go @@ -0,0 +1,52 @@ +package requestcompression + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "net/http" +) + +const captureUncompressedRequestID = "CaptureUncompressedRequest" + +// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check +func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error { + return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{ + buf: buf, + }, "RequestCompression", middleware.Before) +} + +type captureUncompressedRequestMiddleware struct { + req *http.Request + buf *bytes.Buffer + bytes []byte +} + +// ID returns id of the captureUncompressedRequestMiddleware +func (*captureUncompressedRequestMiddleware) ID() string { + return captureUncompressedRequestID +} + +// HandleSerialize captures request payload before it is compressed by request compression middleware +func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + output middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := input.Request.(*smithyhttp.Request) + if !ok { + return output, metadata, fmt.Errorf("error when retrieving http request") + } + + _, err = io.Copy(m.buf, request.GetStream()) + if err != nil { + return output, metadata, fmt.Errorf("error when copying http request stream: %q", err) + } + if err = request.RewindStream(); err != nil { + return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err) + } + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go new file mode 100644 index 00000000..7c414760 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go @@ -0,0 +1,103 @@ +// Package requestcompression implements runtime support for smithy-modeled +// request compression. +// +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package requestcompression + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" + "io" +) + +const MaxRequestMinCompressSizeBytes = 10485760 + +// Enumeration values for supported compress Algorithms. +const ( + GZIP = "gzip" +) + +type compressFunc func(io.Reader) ([]byte, error) + +var allowedAlgorithms = map[string]compressFunc{ + GZIP: gzipCompress, +} + +// AddRequestCompression add requestCompression middleware to op stack +func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error { + return stack.Serialize.Add(&requestCompression{ + disableRequestCompression: disabled, + requestMinCompressSizeBytes: minBytes, + compressAlgorithms: algorithms, + }, middleware.After) +} + +type requestCompression struct { + disableRequestCompression bool + requestMinCompressSizeBytes int64 + compressAlgorithms []string +} + +// ID returns the ID of the middleware +func (m requestCompression) ID() string { + return "RequestCompression" +} + +// HandleSerialize gzip compress the request's stream/body if enabled by config fields +func (m requestCompression) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if m.disableRequestCompression { + return next.HandleSerialize(ctx, in) + } + // still need to check requestMinCompressSizeBytes in case it is out of range after service client config + if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes { + return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes) + } + + req, ok := in.Request.(*http.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + for _, algorithm := range m.compressAlgorithms { + compressFunc := allowedAlgorithms[algorithm] + if compressFunc != nil { + if stream := req.GetStream(); stream != nil { + size, found, err := req.StreamLength() + if err != nil { + return out, metadata, fmt.Errorf("error while finding request stream length, %v", err) + } else if !found || size < m.requestMinCompressSizeBytes { + return next.HandleSerialize(ctx, in) + } + + compressedBytes, err := compressFunc(stream) + if err != nil { + return out, metadata, fmt.Errorf("failed to compress request stream, %v", err) + } + + var newReq *http.Request + if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil { + return out, metadata, fmt.Errorf("failed to set request stream, %v", err) + } + *req = *newReq + + if val := req.Header.Get("Content-Encoding"); val != "" { + req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm)) + } else { + req.Header.Set("Content-Encoding", algorithm) + } + } + break + } + } + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go index c9af66c0..68df4c4e 100644 --- a/vendor/github.com/aws/smithy-go/properties.go +++ b/vendor/github.com/aws/smithy-go/properties.go @@ -1,9 +1,11 @@ package smithy +import "maps" + // PropertiesReader provides an interface for reading metadata from the // underlying metadata container. type PropertiesReader interface { - Get(key interface{}) interface{} + Get(key any) any } // Properties provides storing and reading metadata values. Keys may be any @@ -12,14 +14,14 @@ type PropertiesReader interface { // The zero value for a Properties instance is ready for reads/writes without // any additional initialization. type Properties struct { - values map[interface{}]interface{} + values map[any]any } // Get attempts to retrieve the value the key points to. Returns nil if the // key was not found. // // Panics if key type is not comparable. -func (m *Properties) Get(key interface{}) interface{} { +func (m *Properties) Get(key any) any { m.lazyInit() return m.values[key] } @@ -28,7 +30,7 @@ func (m *Properties) Get(key interface{}) interface{} { // that key it will be replaced with the new value. // // Panics if the key type is not comparable. -func (m *Properties) Set(key, value interface{}) { +func (m *Properties) Set(key, value any) { m.lazyInit() m.values[key] = value } @@ -36,7 +38,7 @@ func (m *Properties) Set(key, value interface{}) { // Has returns whether the key exists in the metadata. // // Panics if the key type is not comparable. -func (m *Properties) Has(key interface{}) bool { +func (m *Properties) Has(key any) bool { m.lazyInit() _, ok := m.values[key] return ok @@ -55,8 +57,13 @@ func (m *Properties) SetAll(other *Properties) { } } +// Values returns a shallow clone of the property set's values. +func (m *Properties) Values() map[any]any { + return maps.Clone(m.values) +} + func (m *Properties) lazyInit() { if m.values == nil { - m.values = map[interface{}]interface{}{} + m.values = map[any]any{} } } diff --git a/vendor/github.com/aws/smithy-go/tracing/context.go b/vendor/github.com/aws/smithy-go/tracing/context.go new file mode 100644 index 00000000..a404ed9d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/tracing/context.go @@ -0,0 +1,96 @@ +package tracing + +import "context" + +type ( + operationTracerKey struct{} + spanLineageKey struct{} +) + +// GetSpan returns the active trace Span on the context. +// +// The boolean in the return indicates whether a Span was actually in the +// context, but a no-op implementation will be returned if not, so callers +// can generally disregard the boolean unless they wish to explicitly confirm +// presence/absence of a Span. +func GetSpan(ctx context.Context) (Span, bool) { + lineage := getLineage(ctx) + if len(lineage) == 0 { + return nopSpan{}, false + } + + return lineage[len(lineage)-1], true +} + +// WithSpan sets the active trace Span on the context. +func WithSpan(parent context.Context, span Span) context.Context { + lineage := getLineage(parent) + if len(lineage) == 0 { + return context.WithValue(parent, spanLineageKey{}, []Span{span}) + } + + lineage = append(lineage, span) + return context.WithValue(parent, spanLineageKey{}, lineage) +} + +// PopSpan pops the current Span off the context, setting the active Span on +// the returned Context back to its parent and returning the REMOVED one. +// +// PopSpan on a context with no active Span will return a no-op instance. +// +// This is mostly necessary for the runtime to manage base trace spans due to +// the wrapped-function nature of the middleware stack. End-users of Smithy +// clients SHOULD NOT generally be using this API. +func PopSpan(parent context.Context) (context.Context, Span) { + lineage := getLineage(parent) + if len(lineage) == 0 { + return parent, nopSpan{} + } + + span := lineage[len(lineage)-1] + lineage = lineage[:len(lineage)-1] + return context.WithValue(parent, spanLineageKey{}, lineage), span +} + +func getLineage(ctx context.Context) []Span { + v := ctx.Value(spanLineageKey{}) + if v == nil { + return nil + } + + return v.([]Span) +} + +// GetOperationTracer returns the embedded operation-scoped Tracer on a +// Context. +// +// The boolean in the return indicates whether a Tracer was actually in the +// context, but a no-op implementation will be returned if not, so callers +// can generally disregard the boolean unless they wish to explicitly confirm +// presence/absence of a Tracer. +func GetOperationTracer(ctx context.Context) (Tracer, bool) { + v := ctx.Value(operationTracerKey{}) + if v == nil { + return nopTracer{}, false + } + + return v.(Tracer), true +} + +// WithOperationTracer returns a child Context embedding the given Tracer. +// +// The runtime will use this embed a scoped tracer for client operations, +// Smithy/SDK client callers DO NOT need to do this explicitly. +func WithOperationTracer(parent context.Context, tracer Tracer) context.Context { + return context.WithValue(parent, operationTracerKey{}, tracer) +} + +// StartSpan is a convenience API for creating tracing Spans from a Context. +// +// StartSpan uses the operation-scoped Tracer, previously stored using +// [WithOperationTracer], to start the Span. If a Tracer has not been embedded +// the returned Span will be a no-op implementation. +func StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { + tracer, _ := GetOperationTracer(ctx) + return tracer.StartSpan(ctx, name, opts...) +} diff --git a/vendor/github.com/aws/smithy-go/tracing/nop.go b/vendor/github.com/aws/smithy-go/tracing/nop.go new file mode 100644 index 00000000..573d28b1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/tracing/nop.go @@ -0,0 +1,32 @@ +package tracing + +import "context" + +// NopTracerProvider is a no-op tracing implementation. +type NopTracerProvider struct{} + +var _ TracerProvider = (*NopTracerProvider)(nil) + +// Tracer returns a tracer which creates no-op spans. +func (NopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return nopTracer{} +} + +type nopTracer struct{} + +var _ Tracer = (*nopTracer)(nil) + +func (nopTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) { + return ctx, nopSpan{} +} + +type nopSpan struct{} + +var _ Span = (*nopSpan)(nil) + +func (nopSpan) Name() string { return "" } +func (nopSpan) Context() SpanContext { return SpanContext{} } +func (nopSpan) AddEvent(string, ...EventOption) {} +func (nopSpan) SetProperty(any, any) {} +func (nopSpan) SetStatus(SpanStatus) {} +func (nopSpan) End() {} diff --git a/vendor/github.com/aws/smithy-go/tracing/tracing.go b/vendor/github.com/aws/smithy-go/tracing/tracing.go new file mode 100644 index 00000000..089ed393 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/tracing/tracing.go @@ -0,0 +1,95 @@ +// Package tracing defines tracing APIs to be used by Smithy clients. +package tracing + +import ( + "context" + + "github.com/aws/smithy-go" +) + +// SpanStatus records the "success" state of an observed span. +type SpanStatus int + +// Enumeration of SpanStatus. +const ( + SpanStatusUnset SpanStatus = iota + SpanStatusOK + SpanStatusError +) + +// SpanKind indicates the nature of the work being performed. +type SpanKind int + +// Enumeration of SpanKind. +const ( + SpanKindInternal SpanKind = iota + SpanKindClient + SpanKindServer + SpanKindProducer + SpanKindConsumer +) + +// TracerProvider is the entry point for creating client traces. +type TracerProvider interface { + Tracer(scope string, opts ...TracerOption) Tracer +} + +// TracerOption applies configuration to a tracer. +type TracerOption func(o *TracerOptions) + +// TracerOptions represent configuration for tracers. +type TracerOptions struct { + Properties smithy.Properties +} + +// Tracer is the entry point for creating observed client Spans. +// +// Spans created by tracers propagate by existing on the Context. Consumers of +// the API can use [GetSpan] to pull the active Span from a Context. +// +// Creation of child Spans is implicit through Context persistence. If +// CreateSpan is called with a Context that holds a Span, the result will be a +// child of that Span. +type Tracer interface { + StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) +} + +// SpanOption applies configuration to a span. +type SpanOption func(o *SpanOptions) + +// SpanOptions represent configuration for span events. +type SpanOptions struct { + Kind SpanKind + Properties smithy.Properties +} + +// Span records a conceptually individual unit of work that takes place in a +// Smithy client operation. +type Span interface { + Name() string + Context() SpanContext + AddEvent(name string, opts ...EventOption) + SetStatus(status SpanStatus) + SetProperty(k, v any) + End() +} + +// EventOption applies configuration to a span event. +type EventOption func(o *EventOptions) + +// EventOptions represent configuration for span events. +type EventOptions struct { + Properties smithy.Properties +} + +// SpanContext uniquely identifies a Span. +type SpanContext struct { + TraceID string + SpanID string + IsRemote bool +} + +// IsValid is true when a span has nonzero trace and span IDs. +func (ctx *SpanContext) IsValid() bool { + return len(ctx.TraceID) != 0 && len(ctx.SpanID) != 0 +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go index e691c69b..0fceae81 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/client.go +++ b/vendor/github.com/aws/smithy-go/transport/http/client.go @@ -6,7 +6,9 @@ import ( "net/http" smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" ) // ClientDo provides the interface for custom HTTP client implementations. @@ -27,13 +29,30 @@ func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { // implementation is http.Client. type ClientHandler struct { client ClientDo + + Meter metrics.Meter // For HTTP client metrics. } // NewClientHandler returns an initialized middleware handler for the client. +// +// Deprecated: Use [NewClientHandlerWithOptions]. func NewClientHandler(client ClientDo) ClientHandler { - return ClientHandler{ + return NewClientHandlerWithOptions(client) +} + +// NewClientHandlerWithOptions returns an initialized middleware handler for the client +// with applied options. +func NewClientHandlerWithOptions(client ClientDo, opts ...func(*ClientHandler)) ClientHandler { + h := ClientHandler{ client: client, } + for _, opt := range opts { + opt(&h) + } + if h.Meter == nil { + h.Meter = metrics.NopMeterProvider{}.Meter("") + } + return h } // Handle implements the middleware Handler interface, that will invoke the @@ -42,6 +61,14 @@ func NewClientHandler(client ClientDo) ClientHandler { func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( out interface{}, metadata middleware.Metadata, err error, ) { + ctx, span := tracing.StartSpan(ctx, "DoHTTPRequest") + defer span.End() + + ctx, client, err := withMetrics(ctx, c.client, c.Meter) + if err != nil { + return nil, metadata, fmt.Errorf("instrument with HTTP metrics: %w", err) + } + req, ok := input.(*Request) if !ok { return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) @@ -52,7 +79,17 @@ func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( return nil, metadata, err } - resp, err := c.client.Do(builtRequest) + span.SetProperty("http.method", req.Method) + span.SetProperty("http.request_content_length", -1) // at least indicate unknown + length, ok, err := req.StreamLength() + if err != nil { + return nil, metadata, err + } + if ok { + span.SetProperty("http.request_content_length", length) + } + + resp, err := client.Do(builtRequest) if resp == nil { // Ensure a http response value is always present to prevent unexpected // panics. @@ -79,6 +116,10 @@ func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( _ = builtRequest.Body.Close() } + span.SetProperty("net.protocol.version", fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor)) + span.SetProperty("http.status_code", resp.StatusCode) + span.SetProperty("http.response_content_length", resp.ContentLength) + return &Response{Response: resp}, metadata, err } diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go index 6b290fec..db9801be 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/host.go +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -69,7 +69,7 @@ func ValidPortNumber(port string) bool { return true } -// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. +// ValidHostLabel returns whether the label is a valid RFC 3986 host label. func ValidHostLabel(label string) bool { if l := len(label); l == 0 || l > 63 { return false diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go new file mode 100644 index 00000000..d1beaa59 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go @@ -0,0 +1,198 @@ +package http + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "sync/atomic" + "time" + + "github.com/aws/smithy-go/metrics" +) + +var now = time.Now + +// withMetrics instruments an HTTP client and context to collect HTTP metrics. +func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) ( + context.Context, ClientDo, error, +) { + hm, err := newHTTPMetrics(meter) + if err != nil { + return nil, nil, err + } + + ctx := httptrace.WithClientTrace(parent, &httptrace.ClientTrace{ + DNSStart: hm.DNSStart, + ConnectStart: hm.ConnectStart, + TLSHandshakeStart: hm.TLSHandshakeStart, + + GotConn: hm.GotConn(parent), + PutIdleConn: hm.PutIdleConn(parent), + ConnectDone: hm.ConnectDone(parent), + DNSDone: hm.DNSDone(parent), + TLSHandshakeDone: hm.TLSHandshakeDone(parent), + GotFirstResponseByte: hm.GotFirstResponseByte(parent), + }) + return ctx, &timedClientDo{client, hm}, nil +} + +type timedClientDo struct { + ClientDo + hm *httpMetrics +} + +func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) { + c.hm.doStart.Store(now()) + resp, err := c.ClientDo.Do(r) + + c.hm.DoRequestDuration.Record(r.Context(), c.hm.doStart.Elapsed()) + return resp, err +} + +type httpMetrics struct { + DNSLookupDuration metrics.Float64Histogram // client.http.connections.dns_lookup_duration + ConnectDuration metrics.Float64Histogram // client.http.connections.acquire_duration + TLSHandshakeDuration metrics.Float64Histogram // client.http.connections.tls_handshake_duration + ConnectionUsage metrics.Int64UpDownCounter // client.http.connections.usage + + DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration + TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte + + doStart safeTime + dnsStart safeTime + connectStart safeTime + tlsStart safeTime +} + +func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) { + hm := &httpMetrics{} + + var err error + hm.DNSLookupDuration, err = meter.Float64Histogram("client.http.connections.dns_lookup_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes a request to perform DNS lookup." + }) + if err != nil { + return nil, err + } + hm.ConnectDuration, err = meter.Float64Histogram("client.http.connections.acquire_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes a request to acquire a connection." + }) + if err != nil { + return nil, err + } + hm.TLSHandshakeDuration, err = meter.Float64Histogram("client.http.connections.tls_handshake_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "The time it takes an HTTP request to perform the TLS handshake." + }) + if err != nil { + return nil, err + } + hm.ConnectionUsage, err = meter.Int64UpDownCounter("client.http.connections.usage", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "{connection}" + o.Description = "Current state of connections pool." + }) + if err != nil { + return nil, err + } + hm.DoRequestDuration, err = meter.Float64Histogram("client.http.do_request_duration", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "Time spent performing an entire HTTP transaction." + }) + if err != nil { + return nil, err + } + hm.TimeToFirstByte, err = meter.Float64Histogram("client.http.time_to_first_byte", func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = "Time from start of transaction to when the first response byte is available." + }) + if err != nil { + return nil, err + } + + return hm, nil +} + +func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) { + m.dnsStart.Store(now()) +} + +func (m *httpMetrics) ConnectStart(string, string) { + m.connectStart.Store(now()) +} + +func (m *httpMetrics) TLSHandshakeStart() { + m.tlsStart.Store(now()) +} + +func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) { + return func(httptrace.GotConnInfo) { + m.addConnAcquired(ctx, 1) + } +} + +func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) { + return func(error) { + m.addConnAcquired(ctx, -1) + } +} + +func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) { + return func(httptrace.DNSDoneInfo) { + m.DNSLookupDuration.Record(ctx, m.dnsStart.Elapsed()) + } +} + +func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) { + return func(string, string, error) { + m.ConnectDuration.Record(ctx, m.connectStart.Elapsed()) + } +} + +func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) { + return func(tls.ConnectionState, error) { + m.TLSHandshakeDuration.Record(ctx, m.tlsStart.Elapsed()) + } +} + +func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() { + return func() { + m.TimeToFirstByte.Record(ctx, m.doStart.Elapsed()) + } +} + +func (m *httpMetrics) addConnAcquired(ctx context.Context, incr int64) { + m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("state", "acquired") + }) +} + +// Not used: it is recommended to track acquired vs idle conn, but we can't +// determine when something is truly idle with the current HTTP client hooks +// available to us. +func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) { + m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("state", "idle") + }) +} + +type safeTime struct { + atomic.Value // time.Time +} + +func (st *safeTime) Store(v time.Time) { + st.Value.Store(v) +} + +func (st *safeTime) Load() time.Time { + t, _ := st.Value.Load().(time.Time) + return t +} + +func (st *safeTime) Elapsed() float64 { + end := now() + elapsed := end.Sub(st.Load()) + return float64(elapsed) / 1e9 +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go index 1d3b218a..914338f2 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -2,10 +2,10 @@ package http import ( "context" + "io" + "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" - "io" - "io/ioutil" ) // AddErrorCloseResponseBodyMiddleware adds the middleware to automatically @@ -30,7 +30,7 @@ func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( if err != nil { if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { // Consume the full body to prevent TCP connection resets on some platforms - _, _ = io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) // Do not validate that the response closes successfully. resp.Body.Close() } @@ -64,7 +64,7 @@ func (m *closeResponseBody) HandleDeserialize( if resp, ok := out.RawResponse.(*Response); ok { // Consume the full body to prevent TCP connection resets on some platforms - _, copyErr := io.Copy(ioutil.Discard, resp.Body) + _, copyErr := io.Copy(io.Discard, resp.Body) if copyErr != nil { middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") } diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go index 7177d6f9..5cbf6f10 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/request.go +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -167,7 +166,7 @@ func (r *Request) Build(ctx context.Context) *http.Request { switch stream := r.stream.(type) { case *io.PipeReader: - req.Body = ioutil.NopCloser(stream) + req.Body = io.NopCloser(stream) req.ContentLength = -1 default: // HTTP Client Request must only have a non-nil body if the @@ -175,7 +174,7 @@ func (r *Request) Build(ctx context.Context) *http.Request { // Client will interpret a non-nil body and ContentLength 0 as // "unknown". This is unwanted behavior. if req.ContentLength != 0 && r.stream != nil { - req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) + req.Body = iointernal.NewSafeReadCloser(io.NopCloser(stream)) } } diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index 47a6a46e..00000000 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/context.go b/vendor/github.com/cenkalti/backoff/context.go deleted file mode 100644 index 7706faa2..00000000 --- a/vendor/github.com/cenkalti/backoff/context.go +++ /dev/null @@ -1,63 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func ensureContext(b BackOff) BackOffContext { - if cb, ok := b.(BackOffContext); ok { - return cb - } - return WithContext(b, context.Background()) -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - } - next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { - return Stop - } - return next -} diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index e936a506..00000000 --- a/vendor/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - var err error - var next time.Duration - var t *time.Timer - - cb := ensureContext(b) - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if permanent, ok := err.(*PermanentError); ok { - return permanent.Err - } - - if next = cb.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - if t == nil { - t = time.NewTimer(next) - defer t.Stop() - } else { - t.Reset(next) - } - - select { - case <-cb.Context().Done(): - return err - case <-t.C: - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) *PermanentError { - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/tries.go b/vendor/github.com/cenkalti/backoff/tries.go deleted file mode 100644 index cfeefd9b..00000000 --- a/vendor/github.com/cenkalti/backoff/tries.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore similarity index 94% rename from vendor/github.com/cenkalti/backoff/.gitignore rename to vendor/github.com/cenkalti/backoff/v5/.gitignore index 00268614..50d95c54 100644 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ b/vendor/github.com/cenkalti/backoff/v5/.gitignore @@ -20,3 +20,6 @@ _cgo_export.* _testmain.go *.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md new file mode 100644 index 00000000..658c3743 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [5.0.0] - 2024-12-19 + +### Added + +- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry. + +### Changed + +- Retry function now accepts additional options for specifying max number of tries and max elapsed time. +- Retry function now accepts a context.Context. +- Operation function signature changed to return result (any type) and error. + +### Removed + +- RetryNotify* and RetryWithData functions. Only single Retry function remains. +- Optional arguments from ExponentialBackoff constructor. +- Clock and Timer interfaces. + +### Fixed + +- The original error is returned from Retry if there's a PermanentError. (#144) +- The Retry function respects the wrapped PermanentError. (#140) diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE similarity index 100% rename from vendor/github.com/cenkalti/backoff/LICENSE rename to vendor/github.com/cenkalti/backoff/v5/LICENSE diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md similarity index 61% rename from vendor/github.com/cenkalti/backoff/README.md rename to vendor/github.com/cenkalti/backoff/v5/README.md index 55ebc98f..4611b1d1 100644 --- a/vendor/github.com/cenkalti/backoff/README.md +++ b/vendor/github.com/cenkalti/backoff/v5/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -9,7 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold ## Usage -See https://godoc.org/github.com/cenkalti/backoff#pkg-examples +Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end. + +For most cases, use `Retry` function. See [example_test.go][example] for an example. + +If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed. ## Contributing @@ -17,14 +21,11 @@ See https://godoc.org/github.com/cenkalti/backoff#pkg-examples * Please don't send a PR without opening an issue and discussing it first. * If proposed change is not a common use case, I will probably not accept it. -[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ +[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go +[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go similarity index 87% rename from vendor/github.com/cenkalti/backoff/backoff.go rename to vendor/github.com/cenkalti/backoff/v5/backoff.go index 3676ee40..dd2b24ca 100644 --- a/vendor/github.com/cenkalti/backoff/backoff.go +++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go @@ -15,16 +15,16 @@ import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. + // backoff.Stop to indicate that no more retries should be made. // // Example usage: // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } + // duration := backoff.NextBackOff() + // if duration == backoff.Stop { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } // NextBackOff() time.Duration diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go new file mode 100644 index 00000000..beb2b38a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/error.go @@ -0,0 +1,46 @@ +package backoff + +import ( + "fmt" + "time" +) + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} + +// Error returns a string representation of the Permanent error. +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Unwrap returns the wrapped error. +func (e *PermanentError) Unwrap() error { + return e.Err +} + +// RetryAfterError signals that the operation should be retried after the given duration. +type RetryAfterError struct { + Duration time.Duration +} + +// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying. +func RetryAfter(seconds int) error { + return &RetryAfterError{Duration: time.Duration(seconds) * time.Second} +} + +// Error returns a string representation of the RetryAfter error. +func (e *RetryAfterError) Error() string { + return fmt.Sprintf("retry after %s", e.Duration) +} diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go similarity index 59% rename from vendor/github.com/cenkalti/backoff/exponential.go rename to vendor/github.com/cenkalti/backoff/v5/exponential.go index a031a659..c1f3e442 100644 --- a/vendor/github.com/cenkalti/backoff/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -11,17 +11,17 @@ period for each retry attempt using a randomization function that grows exponent NextBackOff() is calculated using the following formula: - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) In other words NextBackOff() will range between the randomization factor percentage below and above the retry interval. For example, given the following parameters: - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, multiplied by the exponential, that is, between 2 and 6 seconds. @@ -36,18 +36,18 @@ The elapsed time can be reset by calling Reset(). Example: Given the following default arguments, for 10 tries the sequence will be, and assuming we go over the MaxElapsedTime on the 10th try: - Request # RetryInterval (seconds) Randomized Interval (seconds) + Request # RetryInterval (seconds) Randomized Interval (seconds) - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop Note: Implementation is not thread-safe. */ @@ -56,18 +56,8 @@ type ExponentialBackOff struct { RandomizationFactor float64 Multiplier float64 MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time } // Default values for ExponentialBackOff. @@ -76,57 +66,35 @@ const ( DefaultRandomizationFactor = 0.5 DefaultMultiplier = 1.5 DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute ) // NewExponentialBackOff creates an instance of ExponentialBackOff using default values. func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ + return &ExponentialBackOff{ InitialInterval: DefaultInitialInterval, RandomizationFactor: DefaultRandomizationFactor, Multiplier: DefaultMultiplier, MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() } -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - // Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. func (b *ExponentialBackOff) Reset() { b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() } // NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +// +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop + if b.currentInterval == 0 { + b.currentInterval = b.InitialInterval } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + return next } // Increments the current interval by multiplying it with the multiplier. @@ -140,8 +108,12 @@ func (b *ExponentialBackOff) incrementCurrentInterval() { } // Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +// +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } var delta = randomizationFactor * float64(currentInterval) var minInterval = float64(currentInterval) - delta var maxInterval = float64(currentInterval) + delta diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go new file mode 100644 index 00000000..e43f47fb --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -0,0 +1,139 @@ +package backoff + +import ( + "context" + "errors" + "time" +) + +// DefaultMaxElapsedTime sets a default limit for the total retry duration. +const DefaultMaxElapsedTime = 15 * time.Minute + +// Operation is a function that attempts an operation and may be retried. +type Operation[T any] func() (T, error) + +// Notify is a function called on operation error with the error and backoff duration. +type Notify func(error, time.Duration) + +// retryOptions holds configuration settings for the retry mechanism. +type retryOptions struct { + BackOff BackOff // Strategy for calculating backoff periods. + Timer timer // Timer to manage retry delays. + Notify Notify // Optional function to notify on each retry error. + MaxTries uint // Maximum number of retry attempts. + MaxElapsedTime time.Duration // Maximum total time for all retries. +} + +type RetryOption func(*retryOptions) + +// WithBackOff configures a custom backoff strategy. +func WithBackOff(b BackOff) RetryOption { + return func(args *retryOptions) { + args.BackOff = b + } +} + +// withTimer sets a custom timer for managing delays between retries. +func withTimer(t timer) RetryOption { + return func(args *retryOptions) { + args.Timer = t + } +} + +// WithNotify sets a notification function to handle retry errors. +func WithNotify(n Notify) RetryOption { + return func(args *retryOptions) { + args.Notify = n + } +} + +// WithMaxTries limits the number of retry attempts. +func WithMaxTries(n uint) RetryOption { + return func(args *retryOptions) { + args.MaxTries = n + } +} + +// WithMaxElapsedTime limits the total duration for retry attempts. +func WithMaxElapsedTime(d time.Duration) RetryOption { + return func(args *retryOptions) { + args.MaxElapsedTime = d + } +} + +// Retry attempts the operation until success, a permanent error, or backoff completion. +// It ensures the operation is executed at least once. +// +// Returns the operation result or error if retries are exhausted or context is cancelled. +func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) { + // Initialize default retry options. + args := &retryOptions{ + BackOff: NewExponentialBackOff(), + Timer: &defaultTimer{}, + MaxElapsedTime: DefaultMaxElapsedTime, + } + + // Apply user-provided options to the default settings. + for _, opt := range opts { + opt(args) + } + + defer args.Timer.Stop() + + startedAt := time.Now() + args.BackOff.Reset() + for numTries := uint(1); ; numTries++ { + // Execute the operation. + res, err := operation() + if err == nil { + return res, nil + } + + // Stop retrying if maximum tries exceeded. + if args.MaxTries > 0 && numTries >= args.MaxTries { + return res, err + } + + // Handle permanent errors without retrying. + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, err + } + + // Stop retrying if context is cancelled. + if cerr := context.Cause(ctx); cerr != nil { + return res, cerr + } + + // Calculate next backoff duration. + next := args.BackOff.NextBackOff() + if next == Stop { + return res, err + } + + // Reset backoff if RetryAfterError is encountered. + var retryAfter *RetryAfterError + if errors.As(err, &retryAfter) { + next = retryAfter.Duration + args.BackOff.Reset() + } + + // Stop retrying if maximum elapsed time exceeded. + if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime { + return res, err + } + + // Notify on error if a notifier function is provided. + if args.Notify != nil { + args.Notify(err, next) + } + + // Wait for the next backoff period or context cancellation. + args.Timer.Start(next) + select { + case <-args.Timer.C(): + case <-ctx.Done(): + return res, context.Cause(ctx) + } + } +} diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go similarity index 90% rename from vendor/github.com/cenkalti/backoff/ticker.go rename to vendor/github.com/cenkalti/backoff/v5/ticker.go index e41084b0..f0d4b2ae 100644 --- a/vendor/github.com/cenkalti/backoff/ticker.go +++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go @@ -12,7 +12,8 @@ import ( type Ticker struct { C <-chan time.Time c chan time.Time - b BackOffContext + b BackOff + timer timer stop chan struct{} stopOnce sync.Once } @@ -26,10 +27,11 @@ type Ticker struct { func NewTicker(b BackOff) *Ticker { c := make(chan time.Time) t := &Ticker{ - C: c, - c: c, - b: ensureContext(b), - stop: make(chan struct{}), + C: c, + c: c, + b: b, + timer: &defaultTimer{}, + stop: make(chan struct{}), } t.b.Reset() go t.run() @@ -59,8 +61,6 @@ func (t *Ticker) run() { case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return - case <-t.b.Context().Done(): - return } } } @@ -78,5 +78,6 @@ func (t *Ticker) send(tick time.Time) <-chan time.Time { return nil } - return time.After(next) + t.timer.Start(next) + return t.timer.C() } diff --git a/vendor/github.com/cenkalti/backoff/v5/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go new file mode 100644 index 00000000..a8953097 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v5/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/danieljoos/wincred/sys_unsupported.go b/vendor/github.com/danieljoos/wincred/sys_unsupported.go index b47bccf8..746639ad 100644 --- a/vendor/github.com/danieljoos/wincred/sys_unsupported.go +++ b/vendor/github.com/danieljoos/wincred/sys_unsupported.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package wincred @@ -17,6 +18,7 @@ const ( sysERROR_NOT_FOUND = syscall.Errno(1) sysERROR_INVALID_PARAMETER = syscall.Errno(1) + sysERROR_BAD_USERNAME = syscall.Errno(1) ) func sysCredRead(...interface{}) (*Credential, error) { diff --git a/vendor/github.com/dvsekhvalnov/jose2go/README.md b/vendor/github.com/dvsekhvalnov/jose2go/README.md index bbf0ef75..86415e57 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/README.md +++ b/vendor/github.com/dvsekhvalnov/jose2go/README.md @@ -15,6 +15,10 @@ Extensively unit tested and cross tested (100+ tests) for compatibility with [jo Used in production. GA ready. Current version is 1.6. ## Important +v1.8 added experimental RSA-OAEP-384 and RSA-OAEP-512 key management algorithms + +v1.7 introduced deflate decompression memory limits to avoid denial-of-service attacks aka 'deflate-bomb'. See [Customizing compression](#customizing-compression) section for details. + v1.6 security tuning options v1.5 bug fix release @@ -73,7 +77,7 @@ token, err := jose.Encrypt(payload, jose.DIR, jose.A128GCM, key, jose.Zip(jose.D **Encryption** - RSAES OAEP (using SHA-1 and MGF1 with SHA-1) encryption with A128CBC-HS256, A192CBC-HS384, A256CBC-HS512, A128GCM, A192GCM, A256GCM -- RSAES OAEP 256 (using SHA-256 and MGF1 with SHA-256) encryption with A128CBC-HS256, A192CBC-HS384, A256CBC-HS512, A128GCM, A192GCM, A256GCM +- RSAES OAEP 256, 384, 512 (using SHA-256, 384, 512 and MGF1 with SHA-256, 384, 512) encryption with A128CBC-HS256, A192CBC-HS384, A256CBC-HS512, A128GCM, A192GCM, A256GCM - RSAES-PKCS1-V1_5 encryption with A128CBC-HS256, A192CBC-HS384, A256CBC-HS512, A128GCM, A192GCM, A256GCM - A128KW, A192KW, A256KW encryption with A128CBC-HS256, A192CBC-HS384, A256CBC-HS512, A128GCM, A192GCM, A256GCM - A128GCMKW, A192GCMKW, A256GCMKW encryption with A128CBC-HS256, A192CBC-HS384, A256CBC-HS512, A128GCM, A192GCM, A256GCM @@ -215,8 +219,8 @@ func main() { ``` ### Creating encrypted tokens -#### RSA-OAEP-256, RSA-OAEP and RSA1\_5 key management algorithm -RSA-OAEP-256, RSA-OAEP and RSA1_5 key management expecting `*rsa.PublicKey` public key of corresponding length. +#### RSA-OAEP-512, RSA-OAEP-384, RSA-OAEP-256, RSA-OAEP and RSA1\_5 key management algorithm +RSA-OAEP-512, RSA-OAEP-384, RSA-OAEP-256, RSA-OAEP and RSA1_5 key management expecting `*rsa.PublicKey` public key of corresponding length. ```Go package main @@ -487,7 +491,7 @@ func main() { } ``` -**RSA-OAEP-256**, **RSA-OAEP** and **RSA1_5** key management algorithms expecting `*rsa.PrivateKey` private key of corresponding length: +**RSA-OAEP-512**, **RSA-OAEP-384** ,**RSA-OAEP-256**, **RSA-OAEP** and **RSA1_5** key management algorithms expecting `*rsa.PrivateKey` private key of corresponding length: ```Go package main @@ -997,7 +1001,24 @@ test, headers, err := Decode(token, func(headers map[string]interface{}, payload }) ``` +### Customizing compression +There were denial-of-service attacks reported on JWT libraries that supports deflate compression by constructing malicious payload that explodes in terms of RAM on decompression. See for details: [#33](https://github.com/dvsekhvalnov/jose2go/issues/33) + +As of v1.7.0 `jose2go` limits decompression buffer to 250Kb to limit memory consumption and additionaly provides a way to adjust the limit according to specific scenarios: + +```Go + // Override compression alg with new limits (10Kb example) + jose.RegisterJwc(RegisterJwc(NewDeflate(10240))) +``` + ## Changelog +### 1.8 +- RSA-OAEP-384 and RSA-OAEP-512 key management algorithms + +### 1.7 +- 250Kb limit on decompression buffer +- ability to register deflate compressor with custom limits + ### 1.6 - ability to deregister specific algorithms - configurable min/max restrictions for PBES2-HS256+A128KW, PBES2-HS384+A192KW, PBES2-HS512+A256KW diff --git a/vendor/github.com/dvsekhvalnov/jose2go/deflate.go b/vendor/github.com/dvsekhvalnov/jose2go/deflate.go index c788f5bd..b46cdf9f 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/deflate.go +++ b/vendor/github.com/dvsekhvalnov/jose2go/deflate.go @@ -3,15 +3,27 @@ package jose import ( "bytes" "compress/flate" - "io/ioutil" + "errors" + "io" ) +var ErrSizeExceeded = errors.New("Deflate stream size exceeded limit.") + func init() { - RegisterJwc(new(Deflate)) + // 250Kb limited decompression buffer + RegisterJwc(NewDeflate(250 * 1024)) } // Deflate compression algorithm implementation -type Deflate struct {} +type Deflate struct { + maxBufferSizeBytes int64 +} + +func NewDeflate(maxBufferSizeBytes int64) JwcAlgorithm { + return &Deflate{ + maxBufferSizeBytes: maxBufferSizeBytes, + } +} func (alg *Deflate) Name() string { return DEF @@ -19,21 +31,43 @@ func (alg *Deflate) Name() string { func (alg *Deflate) Compress(plainText []byte) []byte { var buf bytes.Buffer - deflate,_ := flate.NewWriter(&buf, 8) //level=DEFLATED - + deflate, _ := flate.NewWriter(&buf, 8) //level=DEFLATED + deflate.Write(plainText) deflate.Close() - + return buf.Bytes() } -func (alg *Deflate) Decompress(compressedText []byte) []byte { - - enflated,_ := ioutil.ReadAll( - flate.NewReader( - bytes.NewReader(compressedText))) - - return enflated +func (alg *Deflate) Decompress(compressedText []byte) ([]byte, error) { + enflated, err := io.ReadAll( + newMaxBytesReader(alg.maxBufferSizeBytes, + flate.NewReader( + bytes.NewReader(compressedText)))) + + return enflated, err } +// Max bytes reader +type maxBytesReader struct { + reader io.Reader + limit int64 +} + +func newMaxBytesReader(limit int64, r io.Reader) io.Reader { + return &maxBytesReader{reader: r, limit: limit} +} +func (mbr *maxBytesReader) Read(p []byte) (n int, err error) { + if mbr.limit <= 0 { + return 0, ErrSizeExceeded + } + + if int64(len(p)) > mbr.limit { + p = p[0:mbr.limit] + } + + n, err = mbr.reader.Read(p) + mbr.limit -= int64(n) + return +} diff --git a/vendor/github.com/dvsekhvalnov/jose2go/jose.go b/vendor/github.com/dvsekhvalnov/jose2go/jose.go index 3549a918..d1924407 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/jose.go +++ b/vendor/github.com/dvsekhvalnov/jose2go/jose.go @@ -37,6 +37,8 @@ const ( RSA1_5 = "RSA1_5" //RSAES with PKCS #1 v1.5 padding, RFC 3447 RSA_OAEP = "RSA-OAEP" //RSAES using Optimal Assymetric Encryption Padding, RFC 3447 RSA_OAEP_256 = "RSA-OAEP-256" //RSAES using Optimal Assymetric Encryption Padding with SHA-256, RFC 3447 + RSA_OAEP_384 = "RSA-OAEP-384" //RSAES using Optimal Assymetric Encryption Padding with SHA-384, RFC 3447 + RSA_OAEP_512 = "RSA-OAEP-512" //RSAES using Optimal Assymetric Encryption Padding with SHA-512, RFC 3447 A128KW = "A128KW" //AES Key Wrap Algorithm using 128 bit keys, RFC 3394 A192KW = "A192KW" //AES Key Wrap Algorithm using 192 bit keys, RFC 3394 A256KW = "A256KW" //AES Key Wrap Algorithm using 256 bit keys, RFC 3394 @@ -140,7 +142,7 @@ type JwsAlgorithm interface { // JwcAlgorithm is a contract for implementing compression algorithm type JwcAlgorithm interface { Compress(plainText []byte) []byte - Decompress(compressedText []byte) []byte + Decompress(compressedText []byte) ([]byte, error) Name() string } @@ -427,7 +429,9 @@ func decrypt(parts [][]byte, key interface{}) (plainText []byte, headers map[str return nil, nil, errors.New(fmt.Sprintf("jwt.decrypt(): Unknown compression algorithm '%v'", zip)) } - plainBytes = zipAlg.Decompress(plainBytes) + if plainBytes, err = zipAlg.Decompress(plainBytes); err != nil { + return nil, nil, err + } } return plainBytes, jwtHeader, nil diff --git a/vendor/github.com/dvsekhvalnov/jose2go/rsa_oaep.go b/vendor/github.com/dvsekhvalnov/jose2go/rsa_oaep.go index b0d1b52a..ab358d08 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/rsa_oaep.go +++ b/vendor/github.com/dvsekhvalnov/jose2go/rsa_oaep.go @@ -1,57 +1,72 @@ package jose import ( - "errors" - "crypto/rsa" "crypto/rand" - "hash" + "crypto/rsa" "crypto/sha1" "crypto/sha256" + "crypto/sha512" + "errors" + "hash" + "github.com/dvsekhvalnov/jose2go/arrays" ) // RS-AES using OAEP key management algorithm implementation func init() { - RegisterJwa(&RsaOaep {shaSizeBits:1}) - RegisterJwa(&RsaOaep {shaSizeBits:256}) + RegisterJwa(&RsaOaep{shaSizeBits: 1}) + RegisterJwa(&RsaOaep{shaSizeBits: 256}) + RegisterJwa(&RsaOaep{shaSizeBits: 384}) + RegisterJwa(&RsaOaep{shaSizeBits: 512}) } -type RsaOaep struct{ +type RsaOaep struct { shaSizeBits int - // func shaF() hash.Hash } func (alg *RsaOaep) Name() string { switch alg.shaSizeBits { - case 1: return RSA_OAEP - default: return RSA_OAEP_256 + case 1: + return RSA_OAEP + case 256: + return RSA_OAEP_256 + case 384: + return RSA_OAEP_384 + default: + return RSA_OAEP_512 } } func (alg *RsaOaep) WrapNewKey(cekSizeBits int, key interface{}, header map[string]interface{}) (cek []byte, encryptedCek []byte, err error) { - if pubKey,ok:=key.(*rsa.PublicKey);ok { - if cek,err = arrays.Random(cekSizeBits>>3);err==nil { - encryptedCek,err=rsa.EncryptOAEP(alg.sha(),rand.Reader,pubKey,cek,nil) + if pubKey, ok := key.(*rsa.PublicKey); ok { + if cek, err = arrays.Random(cekSizeBits >> 3); err == nil { + encryptedCek, err = rsa.EncryptOAEP(alg.sha(), rand.Reader, pubKey, cek, nil) return } - return nil,nil,err + return nil, nil, err } - return nil,nil,errors.New("RsaOaep.WrapNewKey(): expected key to be '*rsa.PublicKey'") + return nil, nil, errors.New("RsaOaep.WrapNewKey(): expected key to be '*rsa.PublicKey'") } func (alg *RsaOaep) Unwrap(encryptedCek []byte, key interface{}, cekSizeBits int, header map[string]interface{}) (cek []byte, err error) { - if privKey,ok:=key.(*rsa.PrivateKey);ok { - return rsa.DecryptOAEP(alg.sha(), rand.Reader, privKey, encryptedCek, nil) + if privKey, ok := key.(*rsa.PrivateKey); ok { + return rsa.DecryptOAEP(alg.sha(), rand.Reader, privKey, encryptedCek, nil) } - - return nil,errors.New("RsaOaep.Unwrap(): expected key to be '*rsa.PrivateKey'") + + return nil, errors.New("RsaOaep.Unwrap(): expected key to be '*rsa.PrivateKey'") } func (alg *RsaOaep) sha() hash.Hash { switch alg.shaSizeBits { - case 1: return sha1.New() - default: return sha256.New() + case 1: + return sha1.New() + case 256: + return sha256.New() + case 384: + return sha512.New384() + default: + return sha512.New() } } diff --git a/vendor/github.com/elastic/go-sysinfo/NOTICE.txt b/vendor/github.com/elastic/go-sysinfo/NOTICE.txt index ac435396..cb8e89d5 100644 --- a/vendor/github.com/elastic/go-sysinfo/NOTICE.txt +++ b/vendor/github.com/elastic/go-sysinfo/NOTICE.txt @@ -1,5 +1,5 @@ Elastic go-sysinfo -Copyright 2017-2022 Elasticsearch B.V. +Copyright 2017-2024 Elasticsearch B.V. This product includes software developed at Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/go-sysinfo/README.md b/vendor/github.com/elastic/go-sysinfo/README.md index c0f35aa9..b8b0002d 100644 --- a/vendor/github.com/elastic/go-sysinfo/README.md +++ b/vendor/github.com/elastic/go-sysinfo/README.md @@ -79,3 +79,9 @@ This table lists the OS and architectures for which a "provider" is implemented. | windows/arm | | | * On darwin (macOS) host information like machineid and process information like memory, cpu, user and starttime require cgo. + +### Supported Go versions + +go-sysinfo supports the [two most recent Go releases][ci_go_versions]. + +[ci_go_versions]: https://github.com/elastic/go-sysinfo/blob/main/.github/workflows/go.yml#L40-L41 diff --git a/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go b/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go index 071e2d63..00a9d2c7 100644 --- a/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go +++ b/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go @@ -23,22 +23,50 @@ import ( "github.com/elastic/go-sysinfo/types" ) -var ( - hostProvider HostProvider - processProvider ProcessProvider +type ( + HostOptsCreator = func(ProviderOptions) HostProvider + ProcessOptsCreator = func(ProviderOptions) ProcessProvider ) +// HostProvider defines interfaces that provide host-specific metrics type HostProvider interface { Host() (types.Host, error) } +// ProcessProvider defines interfaces that provide process-specific metrics type ProcessProvider interface { Processes() ([]types.Process, error) Process(pid int) (types.Process, error) Self() (types.Process, error) } +type ProviderOptions struct { + Hostfs string +} + +var ( + hostProvider HostProvider + processProvider ProcessProvider + processProviderWithOpts ProcessOptsCreator + hostProviderWithOpts HostOptsCreator +) + +// Register a metrics provider. `provider` should implement one or more of `ProcessProvider` or `HostProvider` func Register(provider interface{}) { + if h, ok := provider.(ProcessOptsCreator); ok { + if processProviderWithOpts != nil { + panic(fmt.Sprintf("ProcessOptsCreator already registered: %T", processProviderWithOpts)) + } + processProviderWithOpts = h + } + + if h, ok := provider.(HostOptsCreator); ok { + if hostProviderWithOpts != nil { + panic(fmt.Sprintf("HostOptsCreator already registered: %T", hostProviderWithOpts)) + } + hostProviderWithOpts = h + } + if h, ok := provider.(HostProvider); ok { if hostProvider != nil { panic(fmt.Sprintf("HostProvider already registered: %v", hostProvider)) @@ -54,5 +82,18 @@ func Register(provider interface{}) { } } -func GetHostProvider() HostProvider { return hostProvider } -func GetProcessProvider() ProcessProvider { return processProvider } +// GetHostProvider returns the HostProvider registered for the system. May return nil. +func GetHostProvider(opts ProviderOptions) HostProvider { + if hostProviderWithOpts != nil { + return hostProviderWithOpts(opts) + } + return hostProvider +} + +// GetProcessProvider returns the ProcessProvider registered on the system. May return nil. +func GetProcessProvider(opts ProviderOptions) ProcessProvider { + if processProviderWithOpts != nil { + return processProviderWithOpts(opts) + } + return processProvider +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go index ea62d205..9af09e51 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go @@ -30,14 +30,12 @@ package aix import "C" import ( + "context" "errors" "fmt" "os" - "strings" "time" - "github.com/joeshaw/multierror" - "github.com/elastic/go-sysinfo/internal/registry" "github.com/elastic/go-sysinfo/providers/shared" "github.com/elastic/go-sysinfo/types" @@ -128,8 +126,12 @@ func (*host) Memory() (*types.HostMemoryInfo, error) { return &mem, nil } +func (h *host) FQDNWithContext(ctx context.Context) (string, error) { + return shared.FQDNWithContext(ctx) +} + func (h *host) FQDN() (string, error) { - return shared.FQDN() + return h.FQDNWithContext(context.Background()) } func newHost() (*host, error) { @@ -162,7 +164,7 @@ func (r *reader) addErr(err error) bool { func (r *reader) Err() error { if len(r.errs) > 0 { - return &multierror.MultiError{Errors: r.errs} + return errors.Join(r.errs...) } return nil } @@ -188,7 +190,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = strings.ToLower(v) + h.info.Hostname = v } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go index d1220db9..3a603f68 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go @@ -21,7 +21,7 @@ package aix import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" @@ -40,7 +40,7 @@ func getOSInfo() (*types.OSInfo, error) { } // Retrieve build version from "/proc/version". - procVersion, err := ioutil.ReadFile("/proc/version") + procVersion, err := os.ReadFile("/proc/version") if err != nil { return nil, fmt.Errorf("failed to get OS info: cannot open /proc/version: %w", err) } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go index cfa35f2a..6fb669df 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go @@ -51,7 +51,7 @@ import ( func (aixSystem) Processes() ([]types.Process, error) { // Retrieve processes using /proc instead of calling // getprocs which will also retrieve kernel threads. - files, err := ioutil.ReadDir("/proc") + files, err := os.ReadDir("/proc") if err != nil { return nil, fmt.Errorf("error while reading /proc: %w", err) } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go index 8b3ed911..92251c35 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go @@ -21,11 +21,17 @@ package darwin import ( "fmt" + "os" "golang.org/x/sys/unix" ) -const hardwareMIB = "hw.machine" +const ( + hardwareMIB = "hw.machine" + procTranslated = "sysctl.proc_translated" + archIntel = "x86_64" + archApple = "arm64" +) func Architecture() (string, error) { arch, err := unix.Sysctl(hardwareMIB) @@ -35,3 +41,33 @@ func Architecture() (string, error) { return arch, nil } + +func NativeArchitecture() (string, error) { + processArch, err := Architecture() + if err != nil { + return "", err + } + + // https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment + + translated, err := unix.SysctlUint32(procTranslated) + if err != nil { + // macos without Rosetta installed doesn't have sysctl.proc_translated + if os.IsNotExist(err) { + return processArch, nil + } + return "", fmt.Errorf("failed to read sysctl.proc_translated: %w", err) + } + + var nativeArch string + + switch translated { + case 0: + nativeArch = processArch + case 1: + // Rosetta 2 is supported only on Apple silicon + nativeArch = archApple + } + + return nativeArch, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go index 9e369d36..8b53eee3 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go @@ -20,14 +20,12 @@ package darwin import ( + "context" "errors" "fmt" "os" - "strings" "time" - "github.com/joeshaw/multierror" - "github.com/elastic/go-sysinfo/internal/registry" "github.com/elastic/go-sysinfo/providers/shared" "github.com/elastic/go-sysinfo/types" @@ -139,8 +137,12 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { return &mem, nil } +func (h *host) FQDNWithContext(ctx context.Context) (string, error) { + return shared.FQDNWithContext(ctx) +} + func (h *host) FQDN() (string, error) { - return shared.FQDN() + return h.FQDNWithContext(context.Background()) } func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { @@ -162,6 +164,7 @@ func newHost() (*host, error) { h := &host{} r := &reader{} r.architecture(h) + r.nativeArchitecture(h) r.bootTime(h) r.hostname(h) r.network(h) @@ -188,7 +191,7 @@ func (r *reader) addErr(err error) bool { func (r *reader) Err() error { if len(r.errs) > 0 { - return &multierror.MultiError{Errors: r.errs} + return errors.Join(r.errs...) } return nil } @@ -201,6 +204,14 @@ func (r *reader) architecture(h *host) { h.info.Architecture = v } +func (r *reader) nativeArchitecture(h *host) { + v, err := NativeArchitecture() + if r.addErr(err) { + return + } + h.info.NativeArchitecture = v +} + func (r *reader) bootTime(h *host) { v, err := BootTime() if r.addErr(err) { @@ -214,7 +225,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = strings.ToLower(v) + h.info.Hostname = v } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/os_darwin.go similarity index 97% rename from vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go rename to vendor/github.com/elastic/go-sysinfo/providers/darwin/os_darwin.go index 0dbe8473..94309446 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/os_darwin.go @@ -19,7 +19,7 @@ package darwin import ( "fmt" - "io/ioutil" + "os" "strconv" "strings" @@ -37,7 +37,7 @@ const ( ) func OperatingSystem() (*types.OSInfo, error) { - data, err := ioutil.ReadFile(systemVersionPlist) + data, err := os.ReadFile(systemVersionPlist) if err != nil { return nil, fmt.Errorf("failed to read plist file: %w", err) } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go index e1d28936..34615385 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go @@ -19,9 +19,20 @@ package linux import ( "fmt" + "os" + "strings" "syscall" ) +const ( + procSysKernelArch = "/proc/sys/kernel/arch" + procVersion = "/proc/version" + arch8664 = "x86_64" + archAmd64 = "amd64" + archArm64 = "arm64" + archAarch64 = "aarch64" +) + func Architecture() (string, error) { var uname syscall.Utsname if err := syscall.Uname(&uname); err != nil { @@ -38,3 +49,38 @@ func Architecture() (string, error) { return string(data), nil } + +func NativeArchitecture() (string, error) { + // /proc/sys/kernel/arch was introduced in Kernel 6.1 + // https://www.kernel.org/doc/html/v6.1/admin-guide/sysctl/kernel.html#arch + // It's the same as uname -m, except that for a process running in emulation + // machine returned from syscall reflects the emulated machine, whilst /proc + // filesystem is read as file so its value is not emulated + data, err := os.ReadFile(procSysKernelArch) + if err != nil { + if os.IsNotExist(err) { + // fallback to checking version string for older kernels + version, err := os.ReadFile(procVersion) + if err != nil && !os.IsNotExist(err) { + return "", fmt.Errorf("failed to read kernel version: %w", err) + } + + versionStr := string(version) + if strings.Contains(versionStr, archAmd64) || strings.Contains(versionStr, arch8664) { + return archAmd64, nil + } else if strings.Contains(versionStr, archArm64) || strings.Contains(versionStr, archAarch64) { + // for parity with Architecture() and /proc/sys/kernel/arch + // as aarch64 and arm64 are used interchangeably + return archAarch64, nil + } + return "", nil + } + + return "", fmt.Errorf("failed to read kernel arch: %w", err) + } + + nativeArch := string(data) + nativeArch = strings.TrimRight(nativeArch, "\n") + + return nativeArch, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go index 7eee188e..c66dd323 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go @@ -21,7 +21,6 @@ import ( "bufio" "bytes" "fmt" - "io/ioutil" "os" ) @@ -29,7 +28,7 @@ const procOneCgroup = "/proc/1/cgroup" // IsContainerized returns true if this process is containerized. func IsContainerized() (bool, error) { - data, err := ioutil.ReadFile(procOneCgroup) + data, err := os.ReadFile(procOneCgroup) if err != nil { if os.IsNotExist(err) { return false, nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go index cd6c0106..24e72d0c 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go @@ -18,15 +18,13 @@ package linux import ( + "context" "errors" "fmt" - "io/ioutil" "os" "path/filepath" - "strings" "time" - "github.com/joeshaw/multierror" "github.com/prometheus/procfs" "github.com/elastic/go-sysinfo/internal/registry" @@ -35,7 +33,9 @@ import ( ) func init() { - registry.Register(newLinuxSystem("")) + // register wrappers that implement the HostFS versions of the ProcessProvider and HostProvider + registry.Register(func(opts registry.ProviderOptions) registry.HostProvider { return newLinuxSystem(opts.Hostfs) }) + registry.Register(func(opts registry.ProviderOptions) registry.ProcessProvider { return newLinuxSystem(opts.Hostfs) }) } type linuxSystem struct { @@ -46,7 +46,7 @@ func newLinuxSystem(hostFS string) linuxSystem { mountPoint := filepath.Join(hostFS, procfs.DefaultMountPoint) fs, _ := procfs.NewFS(mountPoint) return linuxSystem{ - procFS: procFS{FS: fs, mountPoint: mountPoint}, + procFS: procFS{FS: fs, mountPoint: mountPoint, baseMount: hostFS}, } } @@ -60,28 +60,36 @@ type host struct { info types.HostInfo } +// Info returns host info func (h *host) Info() types.HostInfo { return h.info } +// Memory returns memory info func (h *host) Memory() (*types.HostMemoryInfo, error) { - content, err := ioutil.ReadFile(h.procFS.path("meminfo")) + path := h.procFS.path("meminfo") + content, err := os.ReadFile(path) if err != nil { - return nil, err + return nil, fmt.Errorf("error reading meminfo file %s: %w", path, err) } return parseMemInfo(content) } +func (h *host) FQDNWithContext(ctx context.Context) (string, error) { + return shared.FQDNWithContext(ctx) +} + func (h *host) FQDN() (string, error) { - return shared.FQDN() + return h.FQDNWithContext(context.Background()) } // VMStat reports data from /proc/vmstat on linux. func (h *host) VMStat() (*types.VMStatInfo, error) { - content, err := ioutil.ReadFile(h.procFS.path("vmstat")) + path := h.procFS.path("vmstat") + content, err := os.ReadFile(path) if err != nil { - return nil, err + return nil, fmt.Errorf("error reading vmstat file %s: %w", path, err) } return parseVMStat(content) @@ -91,7 +99,7 @@ func (h *host) VMStat() (*types.VMStatInfo, error) { func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { loadAvg, err := h.procFS.LoadAvg() if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching load averages: %w", err) } return &types.LoadAverageInfo{ @@ -103,31 +111,34 @@ func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { // NetworkCounters reports data from /proc/net on linux func (h *host) NetworkCounters() (*types.NetworkCountersInfo, error) { - snmpRaw, err := ioutil.ReadFile(h.procFS.path("net/snmp")) + snmpFile := h.procFS.path("net/snmp") + snmpRaw, err := os.ReadFile(snmpFile) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching net/snmp file %s: %w", snmpFile, err) } snmp, err := getNetSnmpStats(snmpRaw) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing SNMP stats: %w", err) } - netstatRaw, err := ioutil.ReadFile(h.procFS.path("net/netstat")) + netstatFile := h.procFS.path("net/netstat") + netstatRaw, err := os.ReadFile(netstatFile) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching net/netstat file %s: %w", netstatFile, err) } netstat, err := getNetstatStats(netstatRaw) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing netstat file: %w", err) } return &types.NetworkCountersInfo{SNMP: snmp, Netstat: netstat}, nil } +// CPUTime returns host CPU usage metrics func (h *host) CPUTime() (types.CPUTimes, error) { stat, err := h.procFS.Stat() if err != nil { - return types.CPUTimes{}, err + return types.CPUTimes{}, fmt.Errorf("error fetching CPU stats: %w", err) } return types.CPUTimes{ @@ -151,6 +162,7 @@ func newHost(fs procFS) (*host, error) { h := &host{stat: stat, procFS: fs} r := &reader{} r.architecture(h) + r.nativeArchitecture(h) r.bootTime(h) r.containerized(h) r.hostname(h) @@ -179,7 +191,7 @@ func (r *reader) addErr(err error) bool { func (r *reader) Err() error { if len(r.errs) > 0 { - return &multierror.MultiError{Errors: r.errs} + return errors.Join(r.errs...) } return nil } @@ -192,6 +204,14 @@ func (r *reader) architecture(h *host) { h.info.Architecture = v } +func (r *reader) nativeArchitecture(h *host) { + v, err := NativeArchitecture() + if r.addErr(err) { + return + } + h.info.NativeArchitecture = v +} + func (r *reader) bootTime(h *host) { v, err := bootTime(h.procFS.FS) if r.addErr(err) { @@ -213,7 +233,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = strings.ToLower(v) + h.info.Hostname = v } func (r *reader) network(h *host) { @@ -234,7 +254,7 @@ func (r *reader) kernelVersion(h *host) { } func (r *reader) os(h *host) { - v, err := OperatingSystem() + v, err := getOSInfo(h.procFS.baseMount) if r.addErr(err) { return } @@ -246,7 +266,7 @@ func (r *reader) time(h *host) { } func (r *reader) uniqueID(h *host) { - v, err := MachineID() + v, err := MachineIDHostfs(h.procFS.baseMount) if r.addErr(err) { return } @@ -256,6 +276,7 @@ func (r *reader) uniqueID(h *host) { type procFS struct { procfs.FS mountPoint string + baseMount string } func (fs *procFS) path(p ...string) string { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go index adfcd109..a5e8afaa 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go @@ -20,8 +20,8 @@ package linux import ( "bytes" "fmt" - "io/ioutil" "os" + "path/filepath" "github.com/elastic/go-sysinfo/types" ) @@ -30,12 +30,12 @@ import ( // These will be searched in order. var machineIDFiles = []string{"/etc/machine-id", "/var/lib/dbus/machine-id", "/var/db/dbus/machine-id"} -func MachineID() (string, error) { +func machineID(hostfs string) (string, error) { var contents []byte var err error for _, file := range machineIDFiles { - contents, err = ioutil.ReadFile(file) + contents, err = os.ReadFile(filepath.Join(hostfs, file)) if err != nil { if os.IsNotExist(err) { // Try next location @@ -58,3 +58,11 @@ func MachineID() (string, error) { contents = bytes.TrimSpace(contents) return string(contents), nil } + +func MachineIDHostfs(hostfs string) (string, error) { + return machineID(hostfs) +} + +func MachineID() (string, error) { + return machineID("") +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go index f5b02bef..f2e366ab 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go @@ -20,6 +20,7 @@ package linux import ( "bufio" "bytes" + "errors" "fmt" "os" "path/filepath" @@ -27,16 +28,15 @@ import ( "strconv" "strings" - "github.com/joeshaw/multierror" - "github.com/elastic/go-sysinfo/types" ) const ( - osRelease = "/etc/os-release" - lsbRelease = "/etc/lsb-release" - distribRelease = "/etc/*-release" - versionGrok = `(?P(?P[0-9]+)\.?(?P[0-9]+)?\.?(?P\w+)?)(?: \((?P[-\w ]+)\))?` + osRelease = "/etc/os-release" + lsbRelease = "/etc/lsb-release" + distribRelease = "/etc/*-release" + versionGrok = `(?P(?P[0-9]+)\.?(?P[0-9]+)?\.?(?P\w+)?)(?: \((?P[-\w ]+)\))?` + versionGrokSuse = `(?P(?P[0-9]+)(?:[.-]?(?:SP)?(?P[0-9]+))?(?:[.-](?P[0-9]+|\w+))?)(?: \((?P[-\w ]+)\))?` ) var ( @@ -45,11 +45,15 @@ var ( // versionRegexp parses version numbers (e.g. 6 or 6.1 or 6.1.0 or 6.1.0_20150102). versionRegexp = regexp.MustCompile(versionGrok) + + // versionRegexpSuse parses version numbers for SUSE (e.g. 15-SP1). + versionRegexpSuse = regexp.MustCompile(versionGrokSuse) ) // familyMap contains a mapping of family -> []platforms. var familyMap = map[string][]string{ - "arch": {"arch", "antergos", "manjaro"}, + "alpine": {"alpine"}, + "arch": {"arch", "antergos", "manjaro"}, "redhat": { "redhat", "fedora", "centos", "scientific", "oraclelinux", "ol", "amzn", "rhel", "almalinux", "openeuler", "rocky", @@ -69,6 +73,8 @@ func init() { } } +// OperatingSystem returns OS info. This does not take an alternate hostfs. +// to get OS info from an alternate root path, use reader.os() func OperatingSystem() (*types.OSInfo, error) { return getOSInfo("") } @@ -184,34 +190,39 @@ func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { } } - if os.Version != "" { - // Try parsing info from the version. - keys := versionRegexp.SubexpNames() - for i, m := range versionRegexp.FindStringSubmatch(os.Version) { - switch keys[i] { - case "major": - os.Major, _ = strconv.Atoi(m) - case "minor": - os.Minor, _ = strconv.Atoi(m) - case "patch": - os.Patch, _ = strconv.Atoi(m) - case "codename": - if os.Codename == "" { - os.Codename = m - } - } - } + if osRelease["ID_LIKE"] == "suse" { + extractVersionDetails(os, os.Version, versionRegexpSuse) + } else if os.Version != "" { + extractVersionDetails(os, os.Version, versionRegexp) } return os, nil } +func extractVersionDetails(os *types.OSInfo, version string, re *regexp.Regexp) { + keys := re.SubexpNames() + for i, match := range re.FindStringSubmatch(version) { + switch keys[i] { + case "major": + os.Major, _ = strconv.Atoi(match) + case "minor": + os.Minor, _ = strconv.Atoi(match) + case "patch": + os.Patch, _ = strconv.Atoi(match) + case "codename": + if os.Codename == "" { + os.Codename = match + } + } + } +} + func findDistribRelease(baseDir string) (*types.OSInfo, error) { - var errs []error matches, err := filepath.Glob(filepath.Join(baseDir, distribRelease)) if err != nil { return nil, err } + var errs []error for _, path := range matches { if strings.HasSuffix(path, osRelease) || strings.HasSuffix(path, lsbRelease) { continue @@ -227,9 +238,9 @@ func findDistribRelease(baseDir string) (*types.OSInfo, error) { errs = append(errs, fmt.Errorf("in %s: %w", path, err)) continue } - return osInfo, err + return osInfo, nil } - return nil, fmt.Errorf("no valid /etc/-release file found: %w", &multierror.MultiError{Errors: errs}) + return nil, fmt.Errorf("no valid /etc/-release file found: %w", errors.Join(errs...)) } func getDistribRelease(file string) (*types.OSInfo, error) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go index 52bae255..fc3c25be 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go @@ -19,7 +19,7 @@ package linux import ( "bytes" - "io/ioutil" + "fmt" "os" "strconv" "strings" @@ -32,10 +32,11 @@ import ( const userHz = 100 +// Processes returns a list of processes on the system func (s linuxSystem) Processes() ([]types.Process, error) { procs, err := s.procFS.AllProcs() if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching all processes: %w", err) } processes := make([]types.Process, 0, len(procs)) @@ -45,19 +46,21 @@ func (s linuxSystem) Processes() ([]types.Process, error) { return processes, nil } +// Process returns the given process func (s linuxSystem) Process(pid int) (types.Process, error) { - proc, err := s.procFS.NewProc(pid) + proc, err := s.procFS.Proc(pid) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching process: %w", err) } return &process{Proc: proc, fs: s.procFS}, nil } +// Self returns process info for the caller's own PID func (s linuxSystem) Self() (types.Process, error) { proc, err := s.procFS.Self() if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching self process info: %w", err) } return &process{Proc: proc, fs: s.procFS}, nil @@ -69,19 +72,21 @@ type process struct { info *types.ProcessInfo } +// PID returns the PID of the process func (p *process) PID() int { return p.Proc.PID } +// Parent returns the parent process func (p *process) Parent() (types.Process, error) { info, err := p.Info() if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching process info: %w", err) } - proc, err := p.fs.NewProc(info.PPID) + proc, err := p.fs.Proc(info.PPID) if err != nil { - return nil, err + return nil, fmt.Errorf("error fetching data for parent process: %w", err) } return &process{Proc: proc, fs: p.fs}, nil @@ -91,8 +96,8 @@ func (p *process) path(pa ...string) string { return p.fs.path(append([]string{strconv.Itoa(p.PID())}, pa...)...) } +// CWD returns the current working directory func (p *process) CWD() (string, error) { - // TODO: add CWD to procfs cwd, err := os.Readlink(p.path("cwd")) if os.IsNotExist(err) { return "", nil @@ -101,34 +106,35 @@ func (p *process) CWD() (string, error) { return cwd, err } +// Info returns basic process info func (p *process) Info() (types.ProcessInfo, error) { if p.info != nil { return *p.info, nil } - stat, err := p.NewStat() + stat, err := p.Stat() if err != nil { - return types.ProcessInfo{}, err + return types.ProcessInfo{}, fmt.Errorf("error fetching process stats: %w", err) } exe, err := p.Executable() if err != nil { - return types.ProcessInfo{}, err + return types.ProcessInfo{}, fmt.Errorf("error fetching process executable info: %w", err) } args, err := p.CmdLine() if err != nil { - return types.ProcessInfo{}, err + return types.ProcessInfo{}, fmt.Errorf("error fetching process cmdline: %w", err) } cwd, err := p.CWD() if err != nil { - return types.ProcessInfo{}, err + return types.ProcessInfo{}, fmt.Errorf("error fetching process CWD: %w", err) } bootTime, err := bootTime(p.fs.FS) if err != nil { - return types.ProcessInfo{}, err + return types.ProcessInfo{}, fmt.Errorf("error fetching boot time: %w", err) } p.info = &types.ProcessInfo{ @@ -144,8 +150,9 @@ func (p *process) Info() (types.ProcessInfo, error) { return *p.info, nil } +// Memory returns memory stats for the process func (p *process) Memory() (types.MemoryInfo, error) { - stat, err := p.NewStat() + stat, err := p.Stat() if err != nil { return types.MemoryInfo{}, err } @@ -156,8 +163,9 @@ func (p *process) Memory() (types.MemoryInfo, error) { }, nil } +// CPUTime returns CPU usage time for the process func (p *process) CPUTime() (types.CPUTimes, error) { - stat, err := p.NewStat() + stat, err := p.Stat() if err != nil { return types.CPUTimes{}, err } @@ -178,9 +186,10 @@ func (p *process) OpenHandleCount() (int, error) { return p.Proc.FileDescriptorsLen() } +// Environment returns a list of environment variables for the process func (p *process) Environment() (map[string]string, error) { // TODO: add Environment to procfs - content, err := ioutil.ReadFile(p.path("environ")) + content, err := os.ReadFile(p.path("environ")) if err != nil { return nil, err } @@ -204,8 +213,9 @@ func (p *process) Environment() (map[string]string, error) { return env, nil } +// Seccomp returns seccomp info for the process func (p *process) Seccomp() (*types.SeccompInfo, error) { - content, err := ioutil.ReadFile(p.path("status")) + content, err := os.ReadFile(p.path("status")) if err != nil { return nil, err } @@ -213,8 +223,9 @@ func (p *process) Seccomp() (*types.SeccompInfo, error) { return readSeccompFields(content) } +// Capabilities returns capability info for the process func (p *process) Capabilities() (*types.CapabilityInfo, error) { - content, err := ioutil.ReadFile(p.path("status")) + content, err := os.ReadFile(p.path("status")) if err != nil { return nil, err } @@ -222,8 +233,9 @@ func (p *process) Capabilities() (*types.CapabilityInfo, error) { return readCapabilities(content) } +// User returns user info for the process func (p *process) User() (types.UserInfo, error) { - content, err := ioutil.ReadFile(p.path("status")) + content, err := os.ReadFile(p.path("status")) if err != nil { return types.UserInfo{}, err } @@ -249,28 +261,31 @@ func (p *process) User() (types.UserInfo, error) { } return nil }) + if err != nil { + return user, fmt.Errorf("error partsing key-values in user data: %w", err) + } return user, nil } // NetworkStats reports network stats for an individual PID. func (p *process) NetworkCounters() (*types.NetworkCountersInfo, error) { - snmpRaw, err := ioutil.ReadFile(p.path("net/snmp")) + snmpRaw, err := os.ReadFile(p.path("net/snmp")) if err != nil { - return nil, err + return nil, fmt.Errorf("error reading net/snmp file: %w", err) } snmp, err := getNetSnmpStats(snmpRaw) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing SNMP network data: %w", err) } - netstatRaw, err := ioutil.ReadFile(p.path("net/netstat")) + netstatRaw, err := os.ReadFile(p.path("net/netstat")) if err != nil { - return nil, err + return nil, fmt.Errorf("error reading net/netstat file: %w", err) } netstat, err := getNetstatStats(netstatRaw) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing netstat file: %w", err) } return &types.NetworkCountersInfo{SNMP: snmp, Netstat: netstat}, nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go index 8d9c27df..1c1d0584 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go @@ -22,7 +22,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "os" "strconv" ) @@ -51,7 +51,7 @@ func parseKeyValue(content []byte, separator byte, callback func(key, value []by } func findValue(filename, separator, key string) (string, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return "", err } @@ -93,6 +93,7 @@ func decodeBitMap(s string, lookupName func(int) string) ([]string, error) { return names, nil } +// parses a meminfo field, returning either a raw numerical value, or the kB value converted to bytes func parseBytesOrNumber(data []byte) (uint64, error) { parts := bytes.Fields(data) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go index 8cba7bc2..b8bb4552 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go @@ -20,13 +20,14 @@ package shared import ( + "context" "fmt" "net" "os" "strings" ) -// FQDN attempts to lookup the host's fully-qualified domain name and returns it. +// FQDNWithContext attempts to lookup the host's fully-qualified domain name and returns it. // It does so using the following algorithm: // // 1. It gets the hostname from the OS. If this step fails, it returns an error. @@ -40,37 +41,53 @@ import ( // // 4. If steps 2 and 3 both fail, an empty string is returned as the FQDN along with // errors from those steps. -func FQDN() (string, error) { +func FQDNWithContext(ctx context.Context) (string, error) { hostname, err := os.Hostname() if err != nil { return "", fmt.Errorf("could not get hostname to look for FQDN: %w", err) } - return fqdn(hostname) + return fqdn(ctx, hostname) +} + +// FQDN just calls FQDNWithContext with a background context. +// Deprecated. +func FQDN() (string, error) { + return FQDNWithContext(context.Background()) } -func fqdn(hostname string) (string, error) { +func fqdn(ctx context.Context, hostname string) (string, error) { var errs error - cname, err := net.LookupCNAME(hostname) + cname, err := net.DefaultResolver.LookupCNAME(ctx, hostname) if err != nil { errs = fmt.Errorf("could not get FQDN, all methods failed: failed looking up CNAME: %w", err) } + if cname != "" { - return strings.ToLower(strings.TrimSuffix(cname, ".")), nil + cname = strings.TrimSuffix(cname, ".") + + // Go might lowercase the cname "for convenience". Therefore, if cname + // is the same as hostname, return hostname as is. + // See https://github.com/golang/go/blob/go1.22.5/src/net/hosts.go#L38 + if strings.ToLower(cname) == strings.ToLower(hostname) { + return hostname, nil + } + + return cname, nil } - ips, err := net.LookupIP(hostname) + ips, err := net.DefaultResolver.LookupIP(ctx, "ip", hostname) if err != nil { errs = fmt.Errorf("%s: failed looking up IP: %w", errs, err) } for _, ip := range ips { - names, err := net.LookupAddr(ip.String()) + names, err := net.DefaultResolver.LookupAddr(ctx, ip.String()) if err != nil || len(names) == 0 { continue } - return strings.ToLower(strings.TrimSuffix(names[0], ".")), nil + return strings.TrimSuffix(names[0], "."), nil } return "", errs diff --git a/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go b/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go index cd11acd6..bed19f4e 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/shared/network.go @@ -27,17 +27,20 @@ func Network() (ips, macs []string, err error) { return nil, nil, err } - ips = make([]string, 0, len(ifcs)) + // This function fetches all the addresses in a single syscall. Fetching addresses individually for each interface + // can be expensive when the host has a lot of interfaces. This usually happens when the host is doing virtualized + // networking for guests, in Kubernetes for example. + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, nil, err + } + ips = make([]string, 0, len(addrs)) + for _, addr := range addrs { + ips = append(ips, addr.String()) + } + macs = make([]string, 0, len(ifcs)) for _, ifc := range ifcs { - addrs, err := ifc.Addrs() - if err != nil { - return nil, nil, err - } - for _, addr := range addrs { - ips = append(ips, addr.String()) - } - mac := ifc.HardwareAddr.String() if mac != "" { macs = append(macs, mac) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go index 0edfc4d7..81afb81c 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/arch_windows.go @@ -18,14 +18,57 @@ package windows import ( - windows "github.com/elastic/go-windows" + "errors" + + "golang.org/x/sys/windows" + + gowindows "github.com/elastic/go-windows" +) + +const ( + imageFileMachineAmd64 = 0x8664 + imageFileMachineArm64 = 0xAA64 + archIntel = "x86_64" + archArm64 = "arm64" ) func Architecture() (string, error) { - systemInfo, err := windows.GetNativeSystemInfo() + systemInfo, err := gowindows.GetNativeSystemInfo() if err != nil { return "", err } return systemInfo.ProcessorArchitecture.String(), nil } + +func NativeArchitecture() (string, error) { + var processMachine, nativeMachine uint16 + // the pseudo handle doesn't need to be closed + currentProcessHandle := windows.CurrentProcess() + + // IsWow64Process2 was introduced in version 1709 (build 16299 acording to the tables) + // https://learn.microsoft.com/en-us/windows/release-health/release-information + // https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info + err := windows.IsWow64Process2(currentProcessHandle, &processMachine, &nativeMachine) + if err != nil { + if errors.Is(err, windows.ERROR_PROC_NOT_FOUND) { + major, minor, build := windows.RtlGetNtVersionNumbers() + if major < 10 || (major == 10 && minor == 0 && build < 16299) { + return "", nil + } + } + return "", err + } + + var nativeArch string + + switch nativeMachine { + case imageFileMachineAmd64: + // for parity with Architecture() as amd64 and x86_64 are used interchangeably + nativeArch = archIntel + case imageFileMachineArm64: + nativeArch = archArm64 + } + + return nativeArch, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go index b429ff2e..ed948819 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go @@ -18,6 +18,7 @@ package windows import ( + "context" "errors" "fmt" "os" @@ -25,8 +26,6 @@ import ( "syscall" "time" - "github.com/joeshaw/multierror" - stdwindows "golang.org/x/sys/windows" windows "github.com/elastic/go-windows" @@ -84,19 +83,24 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { }, nil } -func (h *host) FQDN() (string, error) { +func (h *host) FQDNWithContext(_ context.Context) (string, error) { fqdn, err := getComputerNameEx(stdwindows.ComputerNamePhysicalDnsFullyQualified) if err != nil { return "", fmt.Errorf("could not get windows FQDN: %s", err) } - return strings.ToLower(strings.TrimSuffix(fqdn, ".")), nil + return strings.TrimSuffix(fqdn, "."), nil +} + +func (h *host) FQDN() (string, error) { + return h.FQDNWithContext(context.Background()) } func newHost() (*host, error) { h := &host{} r := &reader{} r.architecture(h) + r.nativeArchitecture(h) r.bootTime(h) r.hostname(h) r.network(h) @@ -123,7 +127,7 @@ func (r *reader) addErr(err error) bool { func (r *reader) Err() error { if len(r.errs) > 0 { - return &multierror.MultiError{Errors: r.errs} + return errors.Join(r.errs...) } return nil } @@ -136,6 +140,14 @@ func (r *reader) architecture(h *host) { h.info.Architecture = v } +func (r *reader) nativeArchitecture(h *host) { + v, err := NativeArchitecture() + if r.addErr(err) { + return + } + h.info.NativeArchitecture = v +} + func (r *reader) bootTime(h *host) { v, err := BootTime() if r.addErr(err) { @@ -149,7 +161,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = strings.ToLower(v) + h.info.Hostname = v } func getComputerNameEx(name uint32) (string, error) { diff --git a/vendor/github.com/elastic/go-sysinfo/system.go b/vendor/github.com/elastic/go-sysinfo/system.go index b9a33607..e2edfe22 100644 --- a/vendor/github.com/elastic/go-sysinfo/system.go +++ b/vendor/github.com/elastic/go-sysinfo/system.go @@ -30,6 +30,21 @@ import ( _ "github.com/elastic/go-sysinfo/providers/windows" ) +type ProviderOption func(*registry.ProviderOptions) + +// WithHostFS returns a provider with a custom HostFS root path, +// enabling use of the library from within a container, or an alternate root path on linux. +// For example, WithHostFS("/hostfs") can be used when /hostfs points to the root filesystem of the container host. +// For full functionality, the alternate hostfs should have: +// - /proc +// - /var +// - /etc +func WithHostFS(hostfs string) ProviderOption { + return func(po *registry.ProviderOptions) { + po.Hostfs = hostfs + } +} + // Go returns information about the Go runtime. func Go() types.GoInfo { return types.GoInfo{ @@ -40,12 +55,31 @@ func Go() types.GoInfo { } } +func applyOptsAndReturnProvider(opts ...ProviderOption) registry.ProviderOptions { + options := registry.ProviderOptions{} + for _, opt := range opts { + opt(&options) + } + return options +} + +// setupProcessProvider returns a ProcessProvider. +// Most of the exported functions here deal with processes, +// so this just gets wrapped by all the external functions +func setupProcessProvider(opts ...ProviderOption) (registry.ProcessProvider, error) { + provider := registry.GetProcessProvider(applyOptsAndReturnProvider(opts...)) + if provider == nil { + return nil, types.ErrNotImplemented + } + return provider, nil +} + // Host returns information about host on which this process is running. If // host information collection is not implemented for this platform then // types.ErrNotImplemented is returned. // On Darwin (macOS) a types.ErrNotImplemented is returned with cgo disabled. -func Host() (types.Host, error) { - provider := registry.GetHostProvider() +func Host(opts ...ProviderOption) (types.Host, error) { + provider := registry.GetHostProvider(applyOptsAndReturnProvider(opts...)) if provider == nil { return nil, types.ErrNotImplemented } @@ -56,10 +90,10 @@ func Host() (types.Host, error) { // with the given PID. The types.Process object can be used to query information // about the process. If process information collection is not implemented for // this platform then types.ErrNotImplemented is returned. -func Process(pid int) (types.Process, error) { - provider := registry.GetProcessProvider() - if provider == nil { - return nil, types.ErrNotImplemented +func Process(pid int, opts ...ProviderOption) (types.Process, error) { + provider, err := setupProcessProvider(opts...) + if err != nil { + return nil, err } return provider.Process(pid) } @@ -67,10 +101,10 @@ func Process(pid int) (types.Process, error) { // Processes return a list of all processes. If process information collection // is not implemented for this platform then types.ErrNotImplemented is // returned. -func Processes() ([]types.Process, error) { - provider := registry.GetProcessProvider() - if provider == nil { - return nil, types.ErrNotImplemented +func Processes(opts ...ProviderOption) ([]types.Process, error) { + provider, err := setupProcessProvider(opts...) + if err != nil { + return nil, err } return provider.Processes() } @@ -78,10 +112,10 @@ func Processes() ([]types.Process, error) { // Self return a types.Process object representing this process. If process // information collection is not implemented for this platform then // types.ErrNotImplemented is returned. -func Self() (types.Process, error) { - provider := registry.GetProcessProvider() - if provider == nil { - return nil, types.ErrNotImplemented +func Self(opts ...ProviderOption) (types.Process, error) { + provider, err := setupProcessProvider(opts...) + if err != nil { + return nil, err } return provider.Self() } diff --git a/vendor/github.com/elastic/go-sysinfo/types/host.go b/vendor/github.com/elastic/go-sysinfo/types/host.go index 5685e984..9661ce70 100644 --- a/vendor/github.com/elastic/go-sysinfo/types/host.go +++ b/vendor/github.com/elastic/go-sysinfo/types/host.go @@ -17,7 +17,10 @@ package types -import "time" +import ( + "context" + "time" +) // Host is the interface that wraps methods for returning Host stats // It may return partial information if the provider @@ -27,7 +30,11 @@ type Host interface { Info() HostInfo Memory() (*HostMemoryInfo, error) - // FQDN returns the fully-qualified domain name of the host, lowercased. + // FQDNWithContext returns the fully-qualified domain name of the host. + FQDNWithContext(ctx context.Context) (string, error) + + // FQDN calls FQDNWithContext with a background context. + // Deprecated: Use FQDNWithContext instead. FQDN() (string, error) } @@ -66,17 +73,18 @@ type VMStat interface { // HostInfo contains basic host information. type HostInfo struct { - Architecture string `json:"architecture"` // Hardware architecture (e.g. x86_64, arm, ppc, mips). - BootTime time.Time `json:"boot_time"` // Host boot time. - Containerized *bool `json:"containerized,omitempty"` // Is the process containerized. - Hostname string `json:"name"` // Hostname, lowercased. - IPs []string `json:"ip,omitempty"` // List of all IPs. - KernelVersion string `json:"kernel_version"` // Kernel version. - MACs []string `json:"mac"` // List of MAC addresses. - OS *OSInfo `json:"os"` // OS information. - Timezone string `json:"timezone"` // System timezone. - TimezoneOffsetSec int `json:"timezone_offset_sec"` // Timezone offset (seconds from UTC). - UniqueID string `json:"id,omitempty"` // Unique ID of the host (optional). + Architecture string `json:"architecture"` // Process hardware architecture (e.g. x86_64, arm, ppc, mips). + NativeArchitecture string `json:"native_architecture"` // Native OS hardware architecture (e.g. x86_64, arm, ppc, mips). + BootTime time.Time `json:"boot_time"` // Host boot time. + Containerized *bool `json:"containerized,omitempty"` // Is the process containerized. + Hostname string `json:"name"` // Hostname. + IPs []string `json:"ip,omitempty"` // List of all IPs. + KernelVersion string `json:"kernel_version"` // Kernel version. + MACs []string `json:"mac"` // List of MAC addresses. + OS *OSInfo `json:"os"` // OS information. + Timezone string `json:"timezone"` // System timezone. + TimezoneOffsetSec int `json:"timezone_offset_sec"` // Timezone offset (seconds from UTC). + UniqueID string `json:"id,omitempty"` // Unique ID of the host (optional). } // Uptime returns the system uptime diff --git a/vendor/github.com/elastic/go-windows/.appveyor.yml b/vendor/github.com/elastic/go-windows/.appveyor.yml deleted file mode 100644 index ab06a51e..00000000 --- a/vendor/github.com/elastic/go-windows/.appveyor.yml +++ /dev/null @@ -1,61 +0,0 @@ -# Version format -version: "{build}" - -image: Visual Studio 2015 - -# Environment variables -environment: - GOPATH: c:\gopath - GO111MODULE: on - GVM_GO_VERSION: 1.12.4 - GVM_DL: https://github.com/andrewkroh/gvm/releases/download/v0.2.0/gvm-windows-amd64.exe - -# Custom clone folder (variables are not expanded here). -clone_folder: c:\gopath\src\github.com\elastic\go-windows - -# Cache mingw install until appveyor.yml is modified. -cache: -- C:\ProgramData\chocolatey\bin -> .appveyor.yml -- C:\ProgramData\chocolatey\lib -> .appveyor.yml -- C:\Users\appveyor\.gvm -> .appveyor.yml -- C:\Windows\System32\gvm.exe -> .appveyor.yml - -# Scripts that run after cloning repository -install: - - ps: >- - if(!(Test-Path "C:\Windows\System32\gvm.exe")) { - wget "$env:GVM_DL" -Outfile C:\Windows\System32\gvm.exe - } - - ps: gvm --format=powershell "$env:GVM_GO_VERSION" | Invoke-Expression - # AppVeyor has MinGW64. Make sure it's on the PATH. - - set PATH=C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1;%GOROOT%\bin;%PATH% - - set PATH=%GOPATH%\bin;%PATH% - - go version - - go env - - cmd /C "set ""GO111MODULE=off"" && go get github.com/elastic/go-licenser" - - python --version - -before_build: -- go mod verify -- go-licenser -d -- go run .ci/scripts/check_format.go -- go run .ci/scripts/check_lint.go - -build_script: - # Compile - - appveyor AddCompilationMessage "Starting Compile" - - cd c:\gopath\src\github.com\elastic\go-windows - - go build - - appveyor AddCompilationMessage "Compile Success" - -test_script: - # Unit tests - - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - - go test -v ./... - - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - -# To disable deployment -deploy: off - -# Notifications should only be setup using the AppVeyor UI so that -# forks can be created without inheriting the settings. diff --git a/vendor/github.com/elastic/go-windows/.gitignore b/vendor/github.com/elastic/go-windows/.gitignore index 3b38be37..29708a72 100644 --- a/vendor/github.com/elastic/go-windows/.gitignore +++ b/vendor/github.com/elastic/go-windows/.gitignore @@ -21,4 +21,3 @@ *.test *.prof *.pyc -*.swp diff --git a/vendor/github.com/elastic/go-windows/.travis.yml b/vendor/github.com/elastic/go-windows/.travis.yml deleted file mode 100644 index 2a96a1a6..00000000 --- a/vendor/github.com/elastic/go-windows/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -sudo: false - -language: go - -os: -- windows -- linux - -go: -- 1.12.x - -env: -- GO111MODULE=on - -go_import_path: github.com/elastic/go-windows - -before_install: -- GO111MODULE=off go get -u github.com/elastic/go-licenser - -script: -- go mod verify -- go-licenser -d -- go run .ci/scripts/check_format.go -- go run .ci/scripts/check_lint.go -- go test -v ./... diff --git a/vendor/github.com/elastic/go-windows/CHANGELOG.md b/vendor/github.com/elastic/go-windows/CHANGELOG.md index 68698e29..c7fb7d34 100644 --- a/vendor/github.com/elastic/go-windows/CHANGELOG.md +++ b/vendor/github.com/elastic/go-windows/CHANGELOG.md @@ -8,8 +8,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Replace pkg/errors with Go 1.13 native errors #18. + ### Changed +- go.mod - Require Go 1.18 or newer. #27 + ### Deprecated ### Removed diff --git a/vendor/github.com/elastic/go-windows/NOTICE.txt b/vendor/github.com/elastic/go-windows/NOTICE.txt index 807d3ab9..86a6c31e 100644 --- a/vendor/github.com/elastic/go-windows/NOTICE.txt +++ b/vendor/github.com/elastic/go-windows/NOTICE.txt @@ -1,5 +1,5 @@ Elastic go-windows -Copyright 2017-2019 Elasticsearch B.V. +Copyright 2017-2024 Elasticsearch B.V. This product includes software developed at Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/go-windows/README.md b/vendor/github.com/elastic/go-windows/README.md index 1140052d..45353355 100644 --- a/vendor/github.com/elastic/go-windows/README.md +++ b/vendor/github.com/elastic/go-windows/README.md @@ -1,12 +1,9 @@ # go-windows -[![Build Status](http://img.shields.io/travis/elastic/go-windows.svg?style=flat-square)][travis] -[![Build status](https://ci.appveyor.com/api/projects/status/remqhuw0jjguygc3/branch/master?svg=true)][appveyor] +[![ci](https://github.com/elastic/go-windows/actions/workflows/ci.yml/badge.svg)](https://github.com/elastic/go-windows/actions/workflows/ci.yml) [![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] -[travis]: http://travis-ci.org/elastic/go-windows -[appveyor]: https://ci.appveyor.com/project/elastic-beats/go-windows/branch/master -[godocs]: http://godoc.org/github.com/elastic/go-windows +[godocs]: https://pkg.go.dev/github.com/elastic/go-windows?GOOS=windows go-windows is a library for Go (golang) that provides wrappers to various Windows APIs that are not covered by the stdlib or by diff --git a/vendor/github.com/elastic/go-windows/Vagrantfile b/vendor/github.com/elastic/go-windows/Vagrantfile deleted file mode 100644 index 5f529549..00000000 --- a/vendor/github.com/elastic/go-windows/Vagrantfile +++ /dev/null @@ -1,30 +0,0 @@ -# NOTE: This is not a public image. It's only available within the Elastic -# organization and requires a 'vagrant login'. - -GO_VERSION = "1.12.4" - -# Provisioning for Windows PowerShell. -$winPsProvision = <